Merge pull request #109090 from sarveshr7/multicidr-rangeallocator
Enhance NodeIPAM to support multiple ClusterCIDRs
This commit is contained in:
commit
759785ea14
@ -393,6 +393,8 @@ API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDPool
|
|||||||
API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser
|
API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser
|
||||||
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS
|
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS
|
||||||
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS
|
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS
|
||||||
|
API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv4
|
||||||
|
API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv6
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSON,Raw
|
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSON,Raw
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||||
|
961
api/openapi-spec/swagger.json
generated
961
api/openapi-spec/swagger.json
generated
@ -12081,6 +12081,96 @@
|
|||||||
},
|
},
|
||||||
"type": "object"
|
"type": "object"
|
||||||
},
|
},
|
||||||
|
"io.k8s.api.networking.v1alpha1.ClusterCIDR": {
|
||||||
|
"description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.",
|
||||||
|
"properties": {
|
||||||
|
"apiVersion": {
|
||||||
|
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"kind": {
|
||||||
|
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"metadata": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
|
||||||
|
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec",
|
||||||
|
"description": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object",
|
||||||
|
"x-kubernetes-group-version-kind": [
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"io.k8s.api.networking.v1alpha1.ClusterCIDRList": {
|
||||||
|
"description": "ClusterCIDRList contains a list of ClusterCIDR.",
|
||||||
|
"properties": {
|
||||||
|
"apiVersion": {
|
||||||
|
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"items": {
|
||||||
|
"description": "Items is the list of ClusterCIDRs.",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
},
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
"kind": {
|
||||||
|
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"metadata": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta",
|
||||||
|
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"items"
|
||||||
|
],
|
||||||
|
"type": "object",
|
||||||
|
"x-kubernetes-group-version-kind": [
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDRList",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": {
|
||||||
|
"description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.",
|
||||||
|
"properties": {
|
||||||
|
"ipv4": {
|
||||||
|
"description": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"ipv6": {
|
||||||
|
"description": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"nodeSelector": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector",
|
||||||
|
"description": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable."
|
||||||
|
},
|
||||||
|
"perNodeHostBits": {
|
||||||
|
"description": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.",
|
||||||
|
"format": "int32",
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"perNodeHostBits"
|
||||||
|
],
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
"io.k8s.api.node.v1.Overhead": {
|
"io.k8s.api.node.v1.Overhead": {
|
||||||
"description": "Overhead structure represents the resource overhead associated with running a pod.",
|
"description": "Overhead structure represents the resource overhead associated with running a pod.",
|
||||||
"properties": {
|
"properties": {
|
||||||
@ -14650,6 +14740,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -15323,6 +15418,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
@ -67706,6 +67806,867 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"/apis/networking.k8s.io/v1alpha1/": {
|
||||||
|
"get": {
|
||||||
|
"consumes": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"description": "get available resources",
|
||||||
|
"operationId": "getNetworkingV1alpha1APIResources",
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/apis/networking.k8s.io/v1alpha1/clustercidrs": {
|
||||||
|
"delete": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "delete collection of ClusterCIDR",
|
||||||
|
"operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "body",
|
||||||
|
"name": "body",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "continue",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||||
|
"in": "query",
|
||||||
|
"name": "dryRun",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "gracePeriodSeconds",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "labelSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "limit",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "orphanDependents",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "propagationPolicy",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersion",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersionMatch",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "timeoutSeconds",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "deletecollection",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"get": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "list or watch objects of kind ClusterCIDR",
|
||||||
|
"operationId": "listNetworkingV1alpha1ClusterCIDR",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "allowWatchBookmarks",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "continue",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "labelSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "limit",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersion",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersionMatch",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "timeoutSeconds",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "watch",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf",
|
||||||
|
"application/json;stream=watch",
|
||||||
|
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRList"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "list",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "If 'true', then the output is pretty printed.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "pretty",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"post": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "create a ClusterCIDR",
|
||||||
|
"operationId": "createNetworkingV1alpha1ClusterCIDR",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "body",
|
||||||
|
"name": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||||
|
"in": "query",
|
||||||
|
"name": "dryRun",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldManager",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldValidation",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"201": {
|
||||||
|
"description": "Created",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"202": {
|
||||||
|
"description": "Accepted",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "post",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": {
|
||||||
|
"delete": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "delete a ClusterCIDR",
|
||||||
|
"operationId": "deleteNetworkingV1alpha1ClusterCIDR",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "body",
|
||||||
|
"name": "body",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||||
|
"in": "query",
|
||||||
|
"name": "dryRun",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "gracePeriodSeconds",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "orphanDependents",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "propagationPolicy",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"202": {
|
||||||
|
"description": "Accepted",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "delete",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"get": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "read the specified ClusterCIDR",
|
||||||
|
"operationId": "readNetworkingV1alpha1ClusterCIDR",
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "get",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "name of the ClusterCIDR",
|
||||||
|
"in": "path",
|
||||||
|
"name": "name",
|
||||||
|
"required": true,
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "If 'true', then the output is pretty printed.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "pretty",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"patch": {
|
||||||
|
"consumes": [
|
||||||
|
"application/json-patch+json",
|
||||||
|
"application/merge-patch+json",
|
||||||
|
"application/strategic-merge-patch+json",
|
||||||
|
"application/apply-patch+yaml"
|
||||||
|
],
|
||||||
|
"description": "partially update the specified ClusterCIDR",
|
||||||
|
"operationId": "patchNetworkingV1alpha1ClusterCIDR",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "body",
|
||||||
|
"name": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||||
|
"in": "query",
|
||||||
|
"name": "dryRun",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldManager",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldValidation",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "force",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"201": {
|
||||||
|
"description": "Created",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "patch",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"put": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "replace the specified ClusterCIDR",
|
||||||
|
"operationId": "replaceNetworkingV1alpha1ClusterCIDR",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "body",
|
||||||
|
"name": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||||
|
"in": "query",
|
||||||
|
"name": "dryRun",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldManager",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldValidation",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"201": {
|
||||||
|
"description": "Created",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "put",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": {
|
||||||
|
"get": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.",
|
||||||
|
"operationId": "watchNetworkingV1alpha1ClusterCIDRList",
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf",
|
||||||
|
"application/json;stream=watch",
|
||||||
|
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "watchlist",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "allowWatchBookmarks",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "continue",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "labelSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "limit",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "If 'true', then the output is pretty printed.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "pretty",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersion",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersionMatch",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "timeoutSeconds",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "watch",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": {
|
||||||
|
"get": {
|
||||||
|
"consumes": [
|
||||||
|
"*/*"
|
||||||
|
],
|
||||||
|
"description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
|
||||||
|
"operationId": "watchNetworkingV1alpha1ClusterCIDR",
|
||||||
|
"produces": [
|
||||||
|
"application/json",
|
||||||
|
"application/yaml",
|
||||||
|
"application/vnd.kubernetes.protobuf",
|
||||||
|
"application/json;stream=watch",
|
||||||
|
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"schemes": [
|
||||||
|
"https"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"networking_v1alpha1"
|
||||||
|
],
|
||||||
|
"x-kubernetes-action": "watch",
|
||||||
|
"x-kubernetes-group-version-kind": {
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "allowWatchBookmarks",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "continue",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "fieldSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "labelSelector",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "limit",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "name of the ClusterCIDR",
|
||||||
|
"in": "path",
|
||||||
|
"name": "name",
|
||||||
|
"required": true,
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "If 'true', then the output is pretty printed.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "pretty",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersion",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
|
||||||
|
"in": "query",
|
||||||
|
"name": "resourceVersionMatch",
|
||||||
|
"type": "string",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "timeoutSeconds",
|
||||||
|
"type": "integer",
|
||||||
|
"uniqueItems": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||||
|
"in": "query",
|
||||||
|
"name": "watch",
|
||||||
|
"type": "boolean",
|
||||||
|
"uniqueItems": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"/apis/node.k8s.io/": {
|
"/apis/node.k8s.io/": {
|
||||||
"get": {
|
"get": {
|
||||||
"consumes": [
|
"consumes": [
|
||||||
|
@ -8189,6 +8189,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -8889,6 +8894,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -774,6 +774,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1469,6 +1474,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -1163,6 +1163,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1801,6 +1806,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -5186,6 +5186,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -5881,6 +5886,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -533,6 +533,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1171,6 +1176,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -1186,6 +1186,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1881,6 +1886,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -1177,6 +1177,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1872,6 +1877,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -4380,6 +4380,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -5075,6 +5080,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -571,6 +571,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1209,6 +1214,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -460,6 +460,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1103,6 +1108,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -623,6 +623,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1261,6 +1266,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -582,6 +582,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1225,6 +1230,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -1033,6 +1033,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1671,6 +1676,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -1033,6 +1033,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1671,6 +1676,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -550,6 +550,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1188,6 +1193,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -1210,6 +1210,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1905,6 +1910,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
2405
api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json
Normal file
2405
api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -523,6 +523,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1161,6 +1166,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -582,6 +582,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1277,6 +1282,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -843,6 +843,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1538,6 +1543,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -436,6 +436,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1074,6 +1079,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -2275,6 +2275,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -2970,6 +2975,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -462,6 +462,11 @@
|
|||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "DeleteOptions",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "DeleteOptions",
|
"kind": "DeleteOptions",
|
||||||
@ -1157,6 +1162,11 @@
|
|||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
"version": "v1"
|
"version": "v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"group": "networking.k8s.io",
|
||||||
|
"kind": "WatchEvent",
|
||||||
|
"version": "v1alpha1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"group": "networking.k8s.io",
|
"group": "networking.k8s.io",
|
||||||
"kind": "WatchEvent",
|
"kind": "WatchEvent",
|
||||||
|
@ -39,4 +39,5 @@ rules:
|
|||||||
- k8s.io/kubernetes/pkg/util/taints
|
- k8s.io/kubernetes/pkg/util/taints
|
||||||
- k8s.io/kubernetes/pkg/proxy/util
|
- k8s.io/kubernetes/pkg/proxy/util
|
||||||
- k8s.io/kubernetes/pkg/proxy/util/testing
|
- k8s.io/kubernetes/pkg/proxy/util/testing
|
||||||
|
- k8s.io/kubernetes/pkg/util/slice
|
||||||
- k8s.io/kubernetes/pkg/util/sysctl
|
- k8s.io/kubernetes/pkg/util/sysctl
|
@ -26,6 +26,8 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
"k8s.io/client-go/informers/networking/v1alpha1"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/cloud-provider/app"
|
"k8s.io/cloud-provider/app"
|
||||||
cloudcontrollerconfig "k8s.io/cloud-provider/app/config"
|
cloudcontrollerconfig "k8s.io/cloud-provider/app/config"
|
||||||
@ -36,6 +38,7 @@ import (
|
|||||||
nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam"
|
nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam"
|
||||||
nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config"
|
nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -120,8 +123,14 @@ func startNodeIpamController(initContext app.ControllerInitContext, ccmConfig *c
|
|||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var clusterCIDRInformer v1alpha1.ClusterCIDRInformer
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) {
|
||||||
|
clusterCIDRInformer = ctx.InformerFactory.Networking().V1alpha1().ClusterCIDRs()
|
||||||
|
}
|
||||||
|
|
||||||
nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
|
nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
|
||||||
ctx.InformerFactory.Core().V1().Nodes(),
|
ctx.InformerFactory.Core().V1().Nodes(),
|
||||||
|
clusterCIDRInformer,
|
||||||
cloud,
|
cloud,
|
||||||
ctx.ClientBuilder.ClientOrDie(initContext.ClientName),
|
ctx.ClientBuilder.ClientOrDie(initContext.ClientName),
|
||||||
clusterCIDRs,
|
clusterCIDRs,
|
||||||
|
@ -260,6 +260,7 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{
|
|||||||
{Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9},
|
{Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9},
|
||||||
{Group: "certificates.k8s.io", Version: "v1"}: {group: 17300, version: 15},
|
{Group: "certificates.k8s.io", Version: "v1"}: {group: 17300, version: 15},
|
||||||
{Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15},
|
{Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15},
|
||||||
|
{Group: "networking.k8s.io", Version: "v1alpha1"}: {group: 17200, version: 1},
|
||||||
{Group: "policy", Version: "v1"}: {group: 17100, version: 15},
|
{Group: "policy", Version: "v1"}: {group: 17100, version: 15},
|
||||||
{Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9},
|
{Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9},
|
||||||
{Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15},
|
{Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15},
|
||||||
|
@ -27,7 +27,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/client-go/informers/networking/v1alpha1"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -153,8 +155,14 @@ func startNodeIpamController(ctx context.Context, controllerContext ControllerCo
|
|||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var clusterCIDRInformer v1alpha1.ClusterCIDRInformer
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) {
|
||||||
|
clusterCIDRInformer = controllerContext.InformerFactory.Networking().V1alpha1().ClusterCIDRs()
|
||||||
|
}
|
||||||
|
|
||||||
nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
|
nodeIpamController, err := nodeipamcontroller.NewNodeIpamController(
|
||||||
controllerContext.InformerFactory.Core().V1().Nodes(),
|
controllerContext.InformerFactory.Core().V1().Nodes(),
|
||||||
|
clusterCIDRInformer,
|
||||||
controllerContext.Cloud,
|
controllerContext.Cloud,
|
||||||
controllerContext.ClientBuilder.ClientOrDie("node-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("node-controller"),
|
||||||
clusterCIDRs,
|
clusterCIDRs,
|
||||||
|
@ -92,6 +92,7 @@ events.k8s.io/v1 \
|
|||||||
events.k8s.io/v1beta1 \
|
events.k8s.io/v1beta1 \
|
||||||
imagepolicy.k8s.io/v1alpha1 \
|
imagepolicy.k8s.io/v1alpha1 \
|
||||||
networking.k8s.io/v1 \
|
networking.k8s.io/v1 \
|
||||||
|
networking.k8s.io/v1alpha1 \
|
||||||
networking.k8s.io/v1beta1 \
|
networking.k8s.io/v1beta1 \
|
||||||
node.k8s.io/v1 \
|
node.k8s.io/v1 \
|
||||||
node.k8s.io/v1alpha1 \
|
node.k8s.io/v1alpha1 \
|
||||||
|
@ -370,3 +370,62 @@ func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorR
|
|||||||
selector = selector.Add(*r)
|
selector = selector.Add(*r)
|
||||||
return selector, nil
|
return selector, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement
|
||||||
|
// type to a labels.Requirement type.
|
||||||
|
func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) {
|
||||||
|
var op selection.Operator
|
||||||
|
switch nsr.Operator {
|
||||||
|
case v1.NodeSelectorOpIn:
|
||||||
|
op = selection.In
|
||||||
|
case v1.NodeSelectorOpNotIn:
|
||||||
|
op = selection.NotIn
|
||||||
|
case v1.NodeSelectorOpExists:
|
||||||
|
op = selection.Exists
|
||||||
|
case v1.NodeSelectorOpDoesNotExist:
|
||||||
|
op = selection.DoesNotExist
|
||||||
|
case v1.NodeSelectorOpGt:
|
||||||
|
op = selection.GreaterThan
|
||||||
|
case v1.NodeSelectorOpLt:
|
||||||
|
op = selection.LessThan
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator)
|
||||||
|
}
|
||||||
|
return labels.NewRequirement(nsr.Key, op, nsr.Values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeSelectorAsSelector converts the NodeSelector api type into a struct that
|
||||||
|
// implements labels.Selector
|
||||||
|
// Note: This function should be kept in sync with the selector methods in
|
||||||
|
// pkg/labels/selector.go
|
||||||
|
func NodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) {
|
||||||
|
if ns == nil {
|
||||||
|
return labels.Nothing(), nil
|
||||||
|
}
|
||||||
|
if len(ns.NodeSelectorTerms) == 0 {
|
||||||
|
return labels.Everything(), nil
|
||||||
|
}
|
||||||
|
var requirements []labels.Requirement
|
||||||
|
|
||||||
|
for _, nsTerm := range ns.NodeSelectorTerms {
|
||||||
|
for _, expr := range nsTerm.MatchExpressions {
|
||||||
|
req, err := nodeSelectorRequirementsAsLabelRequirements(expr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
requirements = append(requirements, *req)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, field := range nsTerm.MatchFields {
|
||||||
|
req, err := nodeSelectorRequirementsAsLabelRequirements(field)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
requirements = append(requirements, *req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
selector := labels.NewSelector()
|
||||||
|
selector = selector.Add(requirements...)
|
||||||
|
return selector, nil
|
||||||
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/apis/networking"
|
"k8s.io/kubernetes/pkg/apis/networking"
|
||||||
"k8s.io/kubernetes/pkg/apis/networking/v1"
|
"k8s.io/kubernetes/pkg/apis/networking/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/networking/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/apis/networking/v1beta1"
|
"k8s.io/kubernetes/pkg/apis/networking/v1beta1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -36,5 +37,6 @@ func Install(scheme *runtime.Scheme) {
|
|||||||
utilruntime.Must(networking.AddToScheme(scheme))
|
utilruntime.Must(networking.AddToScheme(scheme))
|
||||||
utilruntime.Must(v1.AddToScheme(scheme))
|
utilruntime.Must(v1.AddToScheme(scheme))
|
||||||
utilruntime.Must(v1beta1.AddToScheme(scheme))
|
utilruntime.Must(v1beta1.AddToScheme(scheme))
|
||||||
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
|
utilruntime.Must(v1alpha1.AddToScheme(scheme))
|
||||||
|
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
|
||||||
}
|
}
|
||||||
|
@ -52,6 +52,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||||||
&IngressList{},
|
&IngressList{},
|
||||||
&IngressClass{},
|
&IngressClass{},
|
||||||
&IngressClassList{},
|
&IngressClassList{},
|
||||||
|
&ClusterCIDR{},
|
||||||
|
&ClusterCIDRList{},
|
||||||
)
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -583,3 +583,67 @@ type ServiceBackendPort struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Number int32
|
Number int32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +genclient:nonNamespaced
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||||
|
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||||
|
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||||
|
// resources, all of which will be considered when allocating a CIDR for a
|
||||||
|
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||||
|
// selector matches the node in question and has free CIDRs to allocate. In
|
||||||
|
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||||
|
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||||
|
// selector matches the Node may be used.
|
||||||
|
type ClusterCIDR struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
metav1.ObjectMeta
|
||||||
|
|
||||||
|
Spec ClusterCIDRSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||||
|
type ClusterCIDRSpec struct {
|
||||||
|
// NodeSelector defines which nodes the config is applicable to.
|
||||||
|
// An empty or nil NodeSelector selects all nodes.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
NodeSelector *api.NodeSelector
|
||||||
|
|
||||||
|
// PerNodeHostBits defines the number of host bits to be configured per node.
|
||||||
|
// A subnet mask determines how much of the address is used for network bits
|
||||||
|
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||||
|
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||||
|
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||||
|
// Minimum value is 4 (16 IPs).
|
||||||
|
// This field is immutable.
|
||||||
|
// +required
|
||||||
|
PerNodeHostBits int32
|
||||||
|
|
||||||
|
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||||
|
// At least one of IPv4 and IPv6 must be specified.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
IPv4 string
|
||||||
|
|
||||||
|
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
|
||||||
|
// At least one of IPv4 and IPv6 must be specified.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
IPv6 string
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// ClusterCIDRList contains a list of ClusterCIDRs.
|
||||||
|
type ClusterCIDRList struct {
|
||||||
|
metav1.TypeMeta
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta
|
||||||
|
|
||||||
|
// Items is the list of ClusterCIDRs.
|
||||||
|
Items []ClusterCIDR
|
||||||
|
}
|
||||||
|
25
pkg/apis/networking/v1alpha1/defaults.go
Normal file
25
pkg/apis/networking/v1alpha1/defaults.go
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||||
|
return RegisterDefaults(scheme)
|
||||||
|
}
|
23
pkg/apis/networking/v1alpha1/doc.go
Normal file
23
pkg/apis/networking/v1alpha1/doc.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/networking
|
||||||
|
// +k8s:conversion-gen-external-types=k8s.io/api/networking/v1alpha1
|
||||||
|
// +k8s:defaulter-gen=TypeMeta
|
||||||
|
// +k8s:defaulter-gen-input=k8s.io/api/networking/v1alpha1
|
||||||
|
// +groupName=networking.k8s.io
|
||||||
|
|
||||||
|
package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/networking/v1alpha1"
|
45
pkg/apis/networking/v1alpha1/register.go
Normal file
45
pkg/apis/networking/v1alpha1/register.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupName is the group name use in this package.
|
||||||
|
const GroupName = "networking.k8s.io"
|
||||||
|
|
||||||
|
// SchemeGroupVersion is group version used to register these objects.
|
||||||
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||||
|
|
||||||
|
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||||
|
func Resource(resource string) schema.GroupResource {
|
||||||
|
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
localSchemeBuilder = &networkingv1alpha1.SchemeBuilder
|
||||||
|
AddToScheme = localSchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// We only register manually written functions here. The registration of the
|
||||||
|
// generated functions takes place in the generated files. The separation
|
||||||
|
// makes the code compile even when the generated files are missing.
|
||||||
|
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||||
|
}
|
147
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
Normal file
147
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by conversion-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
unsafe "unsafe"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
|
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
core "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
networking "k8s.io/kubernetes/pkg/apis/networking"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
localSchemeBuilder.Register(RegisterConversions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterConversions adds conversion functions to the given scheme.
|
||||||
|
// Public to allow building arbitrary schemes.
|
||||||
|
func RegisterConversions(s *runtime.Scheme) error {
|
||||||
|
if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDR)(nil), (*networking.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(a.(*v1alpha1.ClusterCIDR), b.(*networking.ClusterCIDR), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDR)(nil), (*v1alpha1.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(a.(*networking.ClusterCIDR), b.(*v1alpha1.ClusterCIDR), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRList)(nil), (*networking.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(a.(*v1alpha1.ClusterCIDRList), b.(*networking.ClusterCIDRList), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRList)(nil), (*v1alpha1.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(a.(*networking.ClusterCIDRList), b.(*v1alpha1.ClusterCIDRList), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRSpec)(nil), (*networking.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(a.(*v1alpha1.ClusterCIDRSpec), b.(*networking.ClusterCIDRSpec), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRSpec)(nil), (*v1alpha1.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(a.(*networking.ClusterCIDRSpec), b.(*v1alpha1.ClusterCIDRSpec), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error {
|
||||||
|
out.ObjectMeta = in.ObjectMeta
|
||||||
|
if err := Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR is an autogenerated conversion function.
|
||||||
|
func Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error {
|
||||||
|
return autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error {
|
||||||
|
out.ObjectMeta = in.ObjectMeta
|
||||||
|
if err := Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR is an autogenerated conversion function.
|
||||||
|
func Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error {
|
||||||
|
return autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error {
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
out.Items = *(*[]networking.ClusterCIDR)(unsafe.Pointer(&in.Items))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList is an autogenerated conversion function.
|
||||||
|
func Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error {
|
||||||
|
return autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error {
|
||||||
|
out.ListMeta = in.ListMeta
|
||||||
|
out.Items = *(*[]v1alpha1.ClusterCIDR)(unsafe.Pointer(&in.Items))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList is an autogenerated conversion function.
|
||||||
|
func Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error {
|
||||||
|
return autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error {
|
||||||
|
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
|
||||||
|
out.PerNodeHostBits = in.PerNodeHostBits
|
||||||
|
out.IPv4 = in.IPv4
|
||||||
|
out.IPv6 = in.IPv6
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec is an autogenerated conversion function.
|
||||||
|
func Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error {
|
||||||
|
return autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error {
|
||||||
|
out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
|
||||||
|
out.PerNodeHostBits = in.PerNodeHostBits
|
||||||
|
out.IPv4 = in.IPv4
|
||||||
|
out.IPv6 = in.IPv6
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec is an autogenerated conversion function.
|
||||||
|
func Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error {
|
||||||
|
return autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in, out, s)
|
||||||
|
}
|
33
pkg/apis/networking/v1alpha1/zz_generated.defaults.go
generated
Normal file
33
pkg/apis/networking/v1alpha1/zz_generated.defaults.go
generated
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||||
|
// Public to allow building arbitrary schemes.
|
||||||
|
// All generated defaulters are covering - they call all nested defaulters.
|
||||||
|
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||||
|
return nil
|
||||||
|
}
|
@ -20,6 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||||
pathvalidation "k8s.io/apimachinery/pkg/api/validation/path"
|
pathvalidation "k8s.io/apimachinery/pkg/api/validation/path"
|
||||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||||
@ -602,3 +603,89 @@ func allowInvalidWildcardHostRule(oldIngress *networking.Ingress) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateClusterCIDRName validates that the given name can be used as an
|
||||||
|
// ClusterCIDR name.
|
||||||
|
var ValidateClusterCIDRName = apimachineryvalidation.NameIsDNSLabel
|
||||||
|
|
||||||
|
// ValidateClusterCIDR validates a ClusterCIDR.
|
||||||
|
func ValidateClusterCIDR(cc *networking.ClusterCIDR) field.ErrorList {
|
||||||
|
allErrs := apivalidation.ValidateObjectMeta(&cc.ObjectMeta, false, ValidateClusterCIDRName, field.NewPath("metadata"))
|
||||||
|
allErrs = append(allErrs, ValidateClusterCIDRSpec(&cc.Spec, field.NewPath("spec"))...)
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateClusterCIDRSpec validates ClusterCIDR Spec.
|
||||||
|
func ValidateClusterCIDRSpec(spec *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList {
|
||||||
|
var allErrs field.ErrorList
|
||||||
|
if spec.NodeSelector != nil {
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateNodeSelector(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate if CIDR is specified for at least one IP Family(IPv4/IPv6).
|
||||||
|
if spec.IPv4 == "" && spec.IPv6 == "" {
|
||||||
|
allErrs = append(allErrs, field.Required(fldPath, "one or both of `ipv4` and `ipv6` must be specified"))
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate specified IPv4 CIDR and PerNodeHostBits.
|
||||||
|
if spec.IPv4 != "" {
|
||||||
|
allErrs = append(allErrs, validateCIDRConfig(spec.IPv4, spec.PerNodeHostBits, 32, v1.IPv4Protocol, fldPath)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate specified IPv6 CIDR and PerNodeHostBits.
|
||||||
|
if spec.IPv6 != "" {
|
||||||
|
allErrs = append(allErrs, validateCIDRConfig(spec.IPv6, spec.PerNodeHostBits, 128, v1.IPv6Protocol, fldPath)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateCIDRConfig(configCIDR string, perNodeHostBits, maxMaskSize int32, ipFamily v1.IPFamily, fldPath *field.Path) field.ErrorList {
|
||||||
|
var allErrs field.ErrorList
|
||||||
|
minPerNodeHostBits := int32(4)
|
||||||
|
|
||||||
|
ip, ipNet, err := netutils.ParseCIDRSloppy(configCIDR)
|
||||||
|
if err != nil {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, fmt.Sprintf("must be a valid CIDR: %s", configCIDR)))
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipFamily == v1.IPv4Protocol && !netutils.IsIPv4(ip) {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv4 CIDR"))
|
||||||
|
}
|
||||||
|
if ipFamily == v1.IPv6Protocol && !netutils.IsIPv6(ip) {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv6 CIDR"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate PerNodeHostBits
|
||||||
|
maskSize, _ := ipNet.Mask.Size()
|
||||||
|
maxPerNodeHostBits := maxMaskSize - int32(maskSize)
|
||||||
|
|
||||||
|
if perNodeHostBits < minPerNodeHostBits {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be greater than or equal to %d", minPerNodeHostBits)))
|
||||||
|
}
|
||||||
|
if perNodeHostBits > maxPerNodeHostBits {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be less than or equal to %d", maxPerNodeHostBits)))
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateClusterCIDRUpdate tests if an update to a ClusterCIDR is valid.
|
||||||
|
func ValidateClusterCIDRUpdate(update, old *networking.ClusterCIDR) field.ErrorList {
|
||||||
|
var allErrs field.ErrorList
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||||
|
allErrs = append(allErrs, validateClusterCIDRUpdateSpec(&update.Spec, &old.Spec, field.NewPath("spec"))...)
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateClusterCIDRUpdateSpec(update, old *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList {
|
||||||
|
var allErrs field.ErrorList
|
||||||
|
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.NodeSelector, old.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.PerNodeHostBits, old.PerNodeHostBits, fldPath.Child("perNodeHostBits"))...)
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv4, old.IPv4, fldPath.Child("ipv4"))...)
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv6, old.IPv6, fldPath.Child("ipv6"))...)
|
||||||
|
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
@ -1982,3 +1982,216 @@ func TestValidateIngressStatusUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector {
|
||||||
|
return &api.NodeSelector{
|
||||||
|
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []api.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: key,
|
||||||
|
Operator: op,
|
||||||
|
Values: values,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeClusterCIDR(perNodeHostBits int32, ipv4, ipv6 string, nodeSelector *api.NodeSelector) *networking.ClusterCIDR {
|
||||||
|
return &networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
ResourceVersion: "9",
|
||||||
|
},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4,
|
||||||
|
IPv6: ipv6,
|
||||||
|
NodeSelector: nodeSelector,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateClusterCIDR(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
cc *networking.ClusterCIDR
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid SingleStack IPv4 ClusterCIDR",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits = maxPerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(16, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits > minPerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(4, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SingleStack IPv6 ClusterCIDR",
|
||||||
|
cc: makeClusterCIDR(8, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits = maxPerNodeHostBit",
|
||||||
|
cc: makeClusterCIDR(64, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits > minPerNodeHostBit",
|
||||||
|
cc: makeClusterCIDR(4, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid SingleStack IPv6 ClusterCIDR perNodeHostBits=100",
|
||||||
|
cc: makeClusterCIDR(100, "", "fd00:1:1::/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid DualStack ClusterCIDR",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid DualStack ClusterCIDR, no NodeSelector",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", nil),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
// Failure cases.
|
||||||
|
{
|
||||||
|
name: "invalid ClusterCIDR, no IPv4 or IPv6 CIDR",
|
||||||
|
cc: makeClusterCIDR(8, "", "", nil),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid ClusterCIDR, invalid nodeSelector",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("NoUppercaseOrSpecialCharsLike=Equals", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
// IPv4 tests.
|
||||||
|
{
|
||||||
|
name: "invalid SingleStack IPv4 ClusterCIDR, invalid spec.IPv4",
|
||||||
|
cc: makeClusterCIDR(8, "test", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid Singlestack IPv4 ClusterCIDR, perNodeHostBits > maxPerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(100, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid SingleStack IPv4 ClusterCIDR, perNodeHostBits < minPerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(2, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
// IPv6 tests.
|
||||||
|
{
|
||||||
|
name: "invalid SingleStack IPv6 ClusterCIDR, invalid spec.IPv6",
|
||||||
|
cc: makeClusterCIDR(8, "", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid SingleStack IPv6 ClusterCIDR, valid IPv4 CIDR in spec.IPv6",
|
||||||
|
cc: makeClusterCIDR(8, "", "10.2.0.0/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(12, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits < minPerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(3, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
// DualStack tests
|
||||||
|
{
|
||||||
|
name: "invalid DualStack ClusterCIDR, valid spec.IPv4, invalid spec.IPv6",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid DualStack ClusterCIDR, valid spec.IPv6, invalid spec.IPv4",
|
||||||
|
cc: makeClusterCIDR(8, "testv4", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid DualStack ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(24, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid DualStack ClusterCIDR, valid IPv6 CIDR in spec.IPv4",
|
||||||
|
cc: makeClusterCIDR(8, "fd00::/120", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
|
err := ValidateClusterCIDR(testCase.cc)
|
||||||
|
if !testCase.expectErr && err != nil {
|
||||||
|
t.Errorf("ValidateClusterCIDR(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err)
|
||||||
|
}
|
||||||
|
if testCase.expectErr && err == nil {
|
||||||
|
t.Errorf("ValidateClusterCIDR(%+v) must return an error for test: %s, but got nil", testCase.cc, testCase.name)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateClusterConfigUpdate(t *testing.T) {
|
||||||
|
oldCCC := makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}))
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
cc *networking.ClusterCIDR
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Successful update, no changes to ClusterCIDR.Spec",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Failed update, update spec.PerNodeHostBits",
|
||||||
|
cc: makeClusterCIDR(12, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Failed update, update spec.IPv4",
|
||||||
|
cc: makeClusterCIDR(8, "10.2.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Failed update, update spec.IPv6",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:2:/112", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Failed update, update spec.NodeSelector",
|
||||||
|
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar2"})),
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
|
err := ValidateClusterCIDRUpdate(testCase.cc, oldCCC)
|
||||||
|
if !testCase.expectErr && err != nil {
|
||||||
|
t.Errorf("ValidateClusterCIDRUpdate(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err)
|
||||||
|
}
|
||||||
|
if testCase.expectErr && err == nil {
|
||||||
|
t.Errorf("ValidateClusterCIDRUpdate(%+v) must return error for test: %s, but got nil", testCase.cc, testCase.name)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
81
pkg/apis/networking/zz_generated.deepcopy.go
generated
81
pkg/apis/networking/zz_generated.deepcopy.go
generated
@ -28,6 +28,87 @@ import (
|
|||||||
core "k8s.io/kubernetes/pkg/apis/core"
|
core "k8s.io/kubernetes/pkg/apis/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR.
|
||||||
|
func (in *ClusterCIDR) DeepCopy() *ClusterCIDR {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterCIDR)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ClusterCIDR) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]ClusterCIDR, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList.
|
||||||
|
func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterCIDRList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ClusterCIDRList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(core.NodeSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec.
|
||||||
|
func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterCIDRSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
|
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
@ -22,16 +22,18 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
informers "k8s.io/client-go/informers/core/v1"
|
informers "k8s.io/client-go/informers/core/v1"
|
||||||
|
networkinginformers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CIDRAllocatorType is the type of the allocator to use.
|
// CIDRAllocatorType is the type of the allocator to use.
|
||||||
@ -41,6 +43,9 @@ const (
|
|||||||
// RangeAllocatorType is the allocator that uses an internal CIDR
|
// RangeAllocatorType is the allocator that uses an internal CIDR
|
||||||
// range allocator to do node CIDR range allocations.
|
// range allocator to do node CIDR range allocations.
|
||||||
RangeAllocatorType CIDRAllocatorType = "RangeAllocator"
|
RangeAllocatorType CIDRAllocatorType = "RangeAllocator"
|
||||||
|
// MultiCIDRRangeAllocatorType is the allocator that uses an internal CIDR
|
||||||
|
// range allocator to do node CIDR range allocations.
|
||||||
|
MultiCIDRRangeAllocatorType CIDRAllocatorType = "MultiCIDRRangeAllocator"
|
||||||
// CloudAllocatorType is the allocator that uses cloud platform
|
// CloudAllocatorType is the allocator that uses cloud platform
|
||||||
// support to do node CIDR range allocations.
|
// support to do node CIDR range allocations.
|
||||||
CloudAllocatorType CIDRAllocatorType = "CloudAllocator"
|
CloudAllocatorType CIDRAllocatorType = "CloudAllocator"
|
||||||
@ -87,7 +92,7 @@ type CIDRAllocator interface {
|
|||||||
// CIDR if it doesn't currently have one or mark the CIDR as used if
|
// CIDR if it doesn't currently have one or mark the CIDR as used if
|
||||||
// the node already have one.
|
// the node already have one.
|
||||||
AllocateOrOccupyCIDR(node *v1.Node) error
|
AllocateOrOccupyCIDR(node *v1.Node) error
|
||||||
// ReleaseCIDR releases the CIDR of the removed node
|
// ReleaseCIDR releases the CIDR of the removed node.
|
||||||
ReleaseCIDR(node *v1.Node) error
|
ReleaseCIDR(node *v1.Node) error
|
||||||
// Run starts all the working logic of the allocator.
|
// Run starts all the working logic of the allocator.
|
||||||
Run(stopCh <-chan struct{})
|
Run(stopCh <-chan struct{})
|
||||||
@ -96,18 +101,25 @@ type CIDRAllocator interface {
|
|||||||
// CIDRAllocatorParams is parameters that's required for creating new
|
// CIDRAllocatorParams is parameters that's required for creating new
|
||||||
// cidr range allocator.
|
// cidr range allocator.
|
||||||
type CIDRAllocatorParams struct {
|
type CIDRAllocatorParams struct {
|
||||||
// ClusterCIDRs is list of cluster cidrs
|
// ClusterCIDRs is list of cluster cidrs.
|
||||||
ClusterCIDRs []*net.IPNet
|
ClusterCIDRs []*net.IPNet
|
||||||
// ServiceCIDR is primary service cidr for cluster
|
// ServiceCIDR is primary service cidr for cluster.
|
||||||
ServiceCIDR *net.IPNet
|
ServiceCIDR *net.IPNet
|
||||||
// SecondaryServiceCIDR is secondary service cidr for cluster
|
// SecondaryServiceCIDR is secondary service cidr for cluster.
|
||||||
SecondaryServiceCIDR *net.IPNet
|
SecondaryServiceCIDR *net.IPNet
|
||||||
// NodeCIDRMaskSizes is list of node cidr mask sizes
|
// NodeCIDRMaskSizes is list of node cidr mask sizes.
|
||||||
NodeCIDRMaskSizes []int
|
NodeCIDRMaskSizes []int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CIDRs are reserved, then node resource is patched with them.
|
||||||
|
// nodeReservedCIDRs holds the reservation info for a node.
|
||||||
|
type nodeReservedCIDRs struct {
|
||||||
|
allocatedCIDRs []*net.IPNet
|
||||||
|
nodeName string
|
||||||
|
}
|
||||||
|
|
||||||
// New creates a new CIDR range allocator.
|
// New creates a new CIDR range allocator.
|
||||||
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) {
|
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, clusterCIDRInformer networkinginformers.ClusterCIDRInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) {
|
||||||
nodeList, err := listNodes(kubeClient)
|
nodeList, err := listNodes(kubeClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -116,6 +128,12 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo
|
|||||||
switch allocatorType {
|
switch allocatorType {
|
||||||
case RangeAllocatorType:
|
case RangeAllocatorType:
|
||||||
return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList)
|
return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList)
|
||||||
|
case MultiCIDRRangeAllocatorType:
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) {
|
||||||
|
return nil, fmt.Errorf("invalid CIDR allocator type: %v, feature gate %v must be enabled", allocatorType, features.MultiCIDRRangeAllocator)
|
||||||
|
}
|
||||||
|
return NewMultiCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRInformer, allocatorParams, nodeList, nil)
|
||||||
|
|
||||||
case CloudAllocatorType:
|
case CloudAllocatorType:
|
||||||
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
|
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
|
||||||
default:
|
default:
|
||||||
@ -144,3 +162,12 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
|
|||||||
}
|
}
|
||||||
return nodeList, nil
|
return nodeList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ipnetToStringList converts a slice of net.IPNet into a list of CIDR in string format
|
||||||
|
func ipnetToStringList(inCIDRs []*net.IPNet) []string {
|
||||||
|
outCIDRs := make([]string, len(inCIDRs))
|
||||||
|
for idx, inCIDR := range inCIDRs {
|
||||||
|
outCIDRs[idx] = inCIDR.String()
|
||||||
|
}
|
||||||
|
return outCIDRs
|
||||||
|
}
|
||||||
|
140
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go
Normal file
140
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ipam
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
|
||||||
|
cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A PriorityQueue implementation based on https://pkg.go.dev/container/heap#example-package-PriorityQueue
|
||||||
|
|
||||||
|
// An PriorityQueueItem is something we manage in a priority queue.
|
||||||
|
type PriorityQueueItem struct {
|
||||||
|
clusterCIDR *cidrset.ClusterCIDR
|
||||||
|
// labelMatchCount is the first determinant of priority.
|
||||||
|
labelMatchCount int
|
||||||
|
// selectorString is a string representation of the labelSelector associated with the cidrSet.
|
||||||
|
selectorString string
|
||||||
|
// index is needed by update and is maintained by the heap.Interface methods.
|
||||||
|
index int // The index of the item in the heap.
|
||||||
|
}
|
||||||
|
|
||||||
|
// A PriorityQueue implements heap.Interface and holds PriorityQueueItems.
|
||||||
|
type PriorityQueue []*PriorityQueueItem
|
||||||
|
|
||||||
|
func (pq PriorityQueue) Len() int { return len(pq) }
|
||||||
|
|
||||||
|
// Less compares the priority queue items, to store in a min heap.
|
||||||
|
// Less(i,j) == true denotes i has higher priority than j.
|
||||||
|
func (pq PriorityQueue) Less(i, j int) bool {
|
||||||
|
if pq[i].labelMatchCount != pq[j].labelMatchCount {
|
||||||
|
// P0: CidrSet with higher number of matching labels has the highest priority.
|
||||||
|
return pq[i].labelMatchCount > pq[j].labelMatchCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the count of matching labels is equal, compare the max allocatable pod CIDRs.
|
||||||
|
if pq[i].maxAllocatable() != pq[j].maxAllocatable() {
|
||||||
|
// P1: CidrSet with fewer allocatable pod CIDRs has higher priority.
|
||||||
|
return pq[i].maxAllocatable() < pq[j].maxAllocatable()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the value of allocatable pod CIDRs is equal, compare the node mask size.
|
||||||
|
if pq[i].nodeMaskSize() != pq[j].nodeMaskSize() {
|
||||||
|
// P2: CidrSet with a PerNodeMaskSize having fewer IPs has higher priority.
|
||||||
|
// For example, `27` (32 IPs) picked before `25` (128 IPs).
|
||||||
|
return pq[i].nodeMaskSize() > pq[j].nodeMaskSize()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the per node mask size are equal compare the CIDR labels.
|
||||||
|
if pq[i].selectorString != pq[j].selectorString {
|
||||||
|
// P3: CidrSet having label with lower alphanumeric value has higher priority.
|
||||||
|
return pq[i].selectorString < pq[j].selectorString
|
||||||
|
}
|
||||||
|
|
||||||
|
// P4: CidrSet having an alpha-numerically smaller IP address value has a higher priority.
|
||||||
|
return pq[i].cidrLabel() < pq[j].cidrLabel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pq PriorityQueue) Swap(i, j int) {
|
||||||
|
pq[i], pq[j] = pq[j], pq[i]
|
||||||
|
pq[i].index = i
|
||||||
|
pq[j].index = j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pq *PriorityQueue) Push(x interface{}) {
|
||||||
|
n := len(*pq)
|
||||||
|
if item, ok := x.(*PriorityQueueItem); ok {
|
||||||
|
item.index = n
|
||||||
|
*pq = append(*pq, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pq *PriorityQueue) Pop() interface{} {
|
||||||
|
old := *pq
|
||||||
|
n := len(old)
|
||||||
|
item := old[n-1]
|
||||||
|
old[n-1] = nil // avoid memory leak.
|
||||||
|
item.index = -1 // for safety.
|
||||||
|
*pq = old[0 : n-1]
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxAllocatable computes the minimum value of the MaxCIDRs for a ClusterCIDR.
|
||||||
|
// It compares the MaxCIDRs for each CIDR family and returns the minimum.
|
||||||
|
// e.g. IPv4 - 10.0.0.0/16 PerNodeMaskSize: 24 MaxCIDRs = 256
|
||||||
|
// IPv6 - ff:ff::/120 PerNodeMaskSize: 120 MaxCIDRs = 1
|
||||||
|
// MaxAllocatable for this ClusterCIDR = 1
|
||||||
|
func (pqi *PriorityQueueItem) maxAllocatable() int {
|
||||||
|
ipv4Allocatable := math.MaxInt
|
||||||
|
ipv6Allocatable := math.MaxInt
|
||||||
|
|
||||||
|
if pqi.clusterCIDR.IPv4CIDRSet != nil {
|
||||||
|
ipv4Allocatable = pqi.clusterCIDR.IPv4CIDRSet.MaxCIDRs
|
||||||
|
}
|
||||||
|
|
||||||
|
if pqi.clusterCIDR.IPv6CIDRSet != nil {
|
||||||
|
ipv6Allocatable = pqi.clusterCIDR.IPv6CIDRSet.MaxCIDRs
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipv4Allocatable < ipv6Allocatable {
|
||||||
|
return ipv4Allocatable
|
||||||
|
}
|
||||||
|
|
||||||
|
return ipv6Allocatable
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodeMaskSize returns IPv4 NodeMaskSize if present, else returns IPv6 NodeMaskSize.
|
||||||
|
// Note the requirement: 32 - IPv4 NodeMaskSize == 128 - IPv6 NodeMaskSize
|
||||||
|
// Due to the above requirement it does not matter which NodeMaskSize we compare.
|
||||||
|
func (pqi *PriorityQueueItem) nodeMaskSize() int {
|
||||||
|
if pqi.clusterCIDR.IPv4CIDRSet != nil {
|
||||||
|
return pqi.clusterCIDR.IPv4CIDRSet.NodeMaskSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return pqi.clusterCIDR.IPv6CIDRSet.NodeMaskSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// cidrLabel returns IPv4 CIDR if present, else returns IPv6 CIDR.
|
||||||
|
func (pqi *PriorityQueueItem) cidrLabel() string {
|
||||||
|
if pqi.clusterCIDR.IPv4CIDRSet != nil {
|
||||||
|
return pqi.clusterCIDR.IPv4CIDRSet.Label
|
||||||
|
}
|
||||||
|
|
||||||
|
return pqi.clusterCIDR.IPv6CIDRSet.Label
|
||||||
|
}
|
170
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go
Normal file
170
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ipam
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/heap"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset"
|
||||||
|
utilnet "k8s.io/utils/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createTestPriorityQueueItem(name, cidr, selectorString string, labelMatchCount, perNodeHostBits int) *PriorityQueueItem {
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||||
|
cidrSet, _ := multicidrset.NewMultiCIDRSet(clusterCIDR, perNodeHostBits)
|
||||||
|
|
||||||
|
return &PriorityQueueItem{
|
||||||
|
clusterCIDR: &multicidrset.ClusterCIDR{
|
||||||
|
Name: name,
|
||||||
|
IPv4CIDRSet: cidrSet,
|
||||||
|
},
|
||||||
|
labelMatchCount: labelMatchCount,
|
||||||
|
selectorString: selectorString,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPriorityQueue(t *testing.T) {
|
||||||
|
|
||||||
|
pqi1 := createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8)
|
||||||
|
pqi2 := createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8)
|
||||||
|
pqi3 := createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8)
|
||||||
|
pqi4 := createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6)
|
||||||
|
pqi5 := createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6)
|
||||||
|
pqi6 := createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6)
|
||||||
|
|
||||||
|
for _, testQueue := range []struct {
|
||||||
|
name string
|
||||||
|
items []*PriorityQueueItem
|
||||||
|
want *PriorityQueueItem
|
||||||
|
}{
|
||||||
|
{"Test queue with single item", []*PriorityQueueItem{pqi1}, pqi1},
|
||||||
|
{"Test queue with items having different labelMatchCount", []*PriorityQueueItem{pqi1, pqi2}, pqi2},
|
||||||
|
{"Test queue with items having same labelMatchCount, different max Allocatable Pod CIDRs", []*PriorityQueueItem{pqi1, pqi2, pqi3}, pqi2},
|
||||||
|
{"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, different PerNodeMaskSize", []*PriorityQueueItem{pqi1, pqi2, pqi4}, pqi4},
|
||||||
|
{"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5}, pqi4},
|
||||||
|
{"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5, pqi6}, pqi4},
|
||||||
|
} {
|
||||||
|
pq := make(PriorityQueue, 0)
|
||||||
|
for _, pqi := range testQueue.items {
|
||||||
|
heap.Push(&pq, pqi)
|
||||||
|
}
|
||||||
|
|
||||||
|
got := heap.Pop(&pq)
|
||||||
|
|
||||||
|
if got != testQueue.want {
|
||||||
|
t.Errorf("Error, wanted: %+v, got: %+v", testQueue.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLess(t *testing.T) {
|
||||||
|
|
||||||
|
for _, testQueue := range []struct {
|
||||||
|
name string
|
||||||
|
items []*PriorityQueueItem
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "different labelMatchCount, i higher priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 2, 8),
|
||||||
|
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 1, 8),
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "different labelMatchCount, i lower priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8),
|
||||||
|
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8),
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, different max allocatable cidrs, i higher priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8),
|
||||||
|
createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8),
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, different max allocatable cidrs, i lower priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr2", "10.1.0.0/16", "foo=bar,name=test2", 2, 8),
|
||||||
|
createTestPriorityQueueItem("cidr3", "172.16.0.0/24", "foo=bar,name=test3", 2, 8),
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i higher priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr2", "10.1.0.0/26", "foo=bar,name=test2", 2, 6),
|
||||||
|
createTestPriorityQueueItem("cidr4", "10.1.1.0/24", "abc=bar,name=test4", 2, 8),
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i lower priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8),
|
||||||
|
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6),
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i higher priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6),
|
||||||
|
createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6),
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i lower priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6),
|
||||||
|
createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6),
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i higher priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6),
|
||||||
|
createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6),
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i lower priority than j",
|
||||||
|
items: []*PriorityQueueItem{
|
||||||
|
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6),
|
||||||
|
createTestPriorityQueueItem("cidr6", "10.0.3.0/26", "abc=bar,name=test4", 2, 6),
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
var pq PriorityQueue
|
||||||
|
pq = testQueue.items
|
||||||
|
got := pq.Less(0, 1)
|
||||||
|
if got != testQueue.want {
|
||||||
|
t.Errorf("Error, wanted: %v, got: %v\nTest %q \npq[0]: %+v \npq[1]: %+v ", testQueue.want, got, testQueue.name, pq[0], pq[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
1205
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go
Normal file
1205
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go
Normal file
File diff suppressed because it is too large
Load Diff
1868
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go
Normal file
1868
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go
Normal file
File diff suppressed because it is too large
Load Diff
78
pkg/controller/nodeipam/ipam/multicidrset/metrics.go
Normal file
78
pkg/controller/nodeipam/ipam/multicidrset/metrics.go
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package multicidrset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/component-base/metrics"
|
||||||
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const nodeIpamSubsystem = "node_ipam_controller"
|
||||||
|
|
||||||
|
var (
|
||||||
|
cidrSetAllocations = metrics.NewCounterVec(
|
||||||
|
&metrics.CounterOpts{
|
||||||
|
Subsystem: nodeIpamSubsystem,
|
||||||
|
Name: "multicidrset_cidrs_allocations_total",
|
||||||
|
Help: "Counter measuring total number of CIDR allocations.",
|
||||||
|
StabilityLevel: metrics.ALPHA,
|
||||||
|
},
|
||||||
|
[]string{"clusterCIDR"},
|
||||||
|
)
|
||||||
|
cidrSetReleases = metrics.NewCounterVec(
|
||||||
|
&metrics.CounterOpts{
|
||||||
|
Subsystem: nodeIpamSubsystem,
|
||||||
|
Name: "multicidrset_cidrs_releases_total",
|
||||||
|
Help: "Counter measuring total number of CIDR releases.",
|
||||||
|
StabilityLevel: metrics.ALPHA,
|
||||||
|
},
|
||||||
|
[]string{"clusterCIDR"},
|
||||||
|
)
|
||||||
|
cidrSetUsage = metrics.NewGaugeVec(
|
||||||
|
&metrics.GaugeOpts{
|
||||||
|
Subsystem: nodeIpamSubsystem,
|
||||||
|
Name: "multicidrset_usage_cidrs",
|
||||||
|
Help: "Gauge measuring percentage of allocated CIDRs.",
|
||||||
|
StabilityLevel: metrics.ALPHA,
|
||||||
|
},
|
||||||
|
[]string{"clusterCIDR"},
|
||||||
|
)
|
||||||
|
cidrSetAllocationTriesPerRequest = metrics.NewHistogramVec(
|
||||||
|
&metrics.HistogramOpts{
|
||||||
|
Subsystem: nodeIpamSubsystem,
|
||||||
|
Name: "multicidrset_allocation_tries_per_request",
|
||||||
|
Help: "Histogram measuring CIDR allocation tries per request.",
|
||||||
|
StabilityLevel: metrics.ALPHA,
|
||||||
|
Buckets: metrics.ExponentialBuckets(1, 5, 5),
|
||||||
|
},
|
||||||
|
[]string{"clusterCIDR"},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
var registerMetrics sync.Once
|
||||||
|
|
||||||
|
// registerCidrsetMetrics the metrics that are to be monitored.
|
||||||
|
func registerCidrsetMetrics() {
|
||||||
|
registerMetrics.Do(func() {
|
||||||
|
legacyregistry.MustRegister(cidrSetAllocations)
|
||||||
|
legacyregistry.MustRegister(cidrSetReleases)
|
||||||
|
legacyregistry.MustRegister(cidrSetUsage)
|
||||||
|
legacyregistry.MustRegister(cidrSetAllocationTriesPerRequest)
|
||||||
|
})
|
||||||
|
}
|
361
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set.go
Normal file
361
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set.go
Normal file
@ -0,0 +1,361 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package multicidrset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"math/bits"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
netutils "k8s.io/utils/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MultiCIDRSet manages a set of CIDR ranges from which blocks of IPs can
|
||||||
|
// be allocated from.
|
||||||
|
type MultiCIDRSet struct {
|
||||||
|
sync.Mutex
|
||||||
|
// ClusterCIDR is the CIDR assigned to the cluster.
|
||||||
|
ClusterCIDR *net.IPNet
|
||||||
|
// NodeMaskSize is the mask size, in bits,assigned to the nodes
|
||||||
|
// caches the mask size to avoid the penalty of calling nodeMask.Size().
|
||||||
|
NodeMaskSize int
|
||||||
|
// MaxCIDRs is the maximum number of CIDRs that can be allocated.
|
||||||
|
MaxCIDRs int
|
||||||
|
// Label stores the CIDR in a string, it is used to identify the metrics such
|
||||||
|
// as Number of allocations, Total number of CIDR releases, Percentage of
|
||||||
|
// allocated CIDRs, Tries required for allocating a CIDR for a particular CIDRSet.
|
||||||
|
Label string
|
||||||
|
// AllocatedCIDRMap stores all the allocated CIDRs from the current CIDRSet.
|
||||||
|
// Stores a mapping of the next candidate CIDR for allocation to it's
|
||||||
|
// allocation status. Next candidate is used only if allocation status is false.
|
||||||
|
AllocatedCIDRMap map[string]bool
|
||||||
|
|
||||||
|
// clusterMaskSize is the mask size, in bits, assigned to the cluster.
|
||||||
|
// caches the mask size to avoid the penalty of calling clusterCIDR.Mask.Size().
|
||||||
|
clusterMaskSize int
|
||||||
|
// nodeMask is the network mask assigned to the nodes.
|
||||||
|
nodeMask net.IPMask
|
||||||
|
// allocatedCIDRs counts the number of CIDRs allocated.
|
||||||
|
allocatedCIDRs int
|
||||||
|
// nextCandidate points to the next CIDR that should be free.
|
||||||
|
nextCandidate int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDR is an internal representation of the ClusterCIDR API object.
|
||||||
|
type ClusterCIDR struct {
|
||||||
|
// Name of the associated ClusterCIDR API object.
|
||||||
|
Name string
|
||||||
|
// IPv4CIDRSet is the MultiCIDRSet representation of ClusterCIDR.spec.ipv4
|
||||||
|
// of the associated ClusterCIDR API object.
|
||||||
|
IPv4CIDRSet *MultiCIDRSet
|
||||||
|
// IPv6CIDRSet is the MultiCIDRSet representation of ClusterCIDR.spec.ipv6
|
||||||
|
// of the associated ClusterCIDR API object.
|
||||||
|
IPv6CIDRSet *MultiCIDRSet
|
||||||
|
// AssociatedNodes is used to identify which nodes have CIDRs allocated from this ClusterCIDR.
|
||||||
|
// Stores a mapping of node name to association status.
|
||||||
|
AssociatedNodes map[string]bool
|
||||||
|
// Terminating is used to identify whether ClusterCIDR has been marked for termination.
|
||||||
|
Terminating bool
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The subnet mask size cannot be greater than 16 more than the cluster mask size
|
||||||
|
// TODO: https://github.com/kubernetes/kubernetes/issues/44918
|
||||||
|
// clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap.
|
||||||
|
// Due to this limitation the subnet mask for IPv6 cluster cidr needs to be >= 48
|
||||||
|
// as default mask size for IPv6 is 64.
|
||||||
|
clusterSubnetMaxDiff = 16
|
||||||
|
// halfIPv6Len is the half of the IPv6 length.
|
||||||
|
halfIPv6Len = net.IPv6len / 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// CIDRRangeNoCIDRsRemainingErr is an error type used to denote there is no more
|
||||||
|
// space to allocate CIDR ranges from the given CIDR.
|
||||||
|
type CIDRRangeNoCIDRsRemainingErr struct {
|
||||||
|
// CIDR represents the CIDR which is exhausted.
|
||||||
|
CIDR string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *CIDRRangeNoCIDRsRemainingErr) Error() string {
|
||||||
|
return fmt.Sprintf("CIDR allocation failed; there are no remaining CIDRs left to allocate in the range %s", err.CIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CIDRSetSubNetTooBigErr is an error type to denote that subnet mask size is too
|
||||||
|
// big compared to the CIDR mask size.
|
||||||
|
type CIDRSetSubNetTooBigErr struct {
|
||||||
|
cidr string
|
||||||
|
subnetMaskSize int
|
||||||
|
clusterMaskSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *CIDRSetSubNetTooBigErr) Error() string {
|
||||||
|
return fmt.Sprintf("Creation of New CIDR Set failed for %s. "+
|
||||||
|
"PerNodeMaskSize %d is too big for CIDR Mask %d, Maximum difference allowed "+
|
||||||
|
"is %d", err.cidr, err.subnetMaskSize, err.clusterMaskSize, clusterSubnetMaxDiff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiCIDRSet creates a new MultiCIDRSet.
|
||||||
|
func NewMultiCIDRSet(cidrConfig *net.IPNet, perNodeHostBits int) (*MultiCIDRSet, error) {
|
||||||
|
clusterMask := cidrConfig.Mask
|
||||||
|
clusterMaskSize, bits := clusterMask.Size()
|
||||||
|
|
||||||
|
var subNetMaskSize int
|
||||||
|
switch /*v4 or v6*/ {
|
||||||
|
case netutils.IsIPv4(cidrConfig.IP):
|
||||||
|
subNetMaskSize = 32 - perNodeHostBits
|
||||||
|
case netutils.IsIPv6(cidrConfig.IP):
|
||||||
|
subNetMaskSize = 128 - perNodeHostBits
|
||||||
|
}
|
||||||
|
|
||||||
|
if netutils.IsIPv6(cidrConfig.IP) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff) {
|
||||||
|
return nil, &CIDRSetSubNetTooBigErr{
|
||||||
|
cidr: cidrConfig.String(),
|
||||||
|
subnetMaskSize: subNetMaskSize,
|
||||||
|
clusterMaskSize: clusterMaskSize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register MultiCIDRSet metrics.
|
||||||
|
registerCidrsetMetrics()
|
||||||
|
|
||||||
|
return &MultiCIDRSet{
|
||||||
|
ClusterCIDR: cidrConfig,
|
||||||
|
nodeMask: net.CIDRMask(subNetMaskSize, bits),
|
||||||
|
clusterMaskSize: clusterMaskSize,
|
||||||
|
MaxCIDRs: 1 << uint32(subNetMaskSize-clusterMaskSize),
|
||||||
|
NodeMaskSize: subNetMaskSize,
|
||||||
|
Label: cidrConfig.String(),
|
||||||
|
AllocatedCIDRMap: make(map[string]bool, 0),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiCIDRSet) indexToCIDRBlock(index int) (*net.IPNet, error) {
|
||||||
|
var ip []byte
|
||||||
|
switch /*v4 or v6*/ {
|
||||||
|
case netutils.IsIPv4(s.ClusterCIDR.IP):
|
||||||
|
j := uint32(index) << uint32(32-s.NodeMaskSize)
|
||||||
|
ipInt := (binary.BigEndian.Uint32(s.ClusterCIDR.IP)) | j
|
||||||
|
ip = make([]byte, net.IPv4len)
|
||||||
|
binary.BigEndian.PutUint32(ip, ipInt)
|
||||||
|
case netutils.IsIPv6(s.ClusterCIDR.IP):
|
||||||
|
// leftClusterIP | rightClusterIP
|
||||||
|
// 2001:0DB8:1234:0000:0000:0000:0000:0000
|
||||||
|
const v6NBits = 128
|
||||||
|
const halfV6NBits = v6NBits / 2
|
||||||
|
leftClusterIP := binary.BigEndian.Uint64(s.ClusterCIDR.IP[:halfIPv6Len])
|
||||||
|
rightClusterIP := binary.BigEndian.Uint64(s.ClusterCIDR.IP[halfIPv6Len:])
|
||||||
|
|
||||||
|
ip = make([]byte, net.IPv6len)
|
||||||
|
|
||||||
|
if s.NodeMaskSize <= halfV6NBits {
|
||||||
|
// We only care about left side IP.
|
||||||
|
leftClusterIP |= uint64(index) << uint(halfV6NBits-s.NodeMaskSize)
|
||||||
|
} else {
|
||||||
|
if s.clusterMaskSize < halfV6NBits {
|
||||||
|
// see how many bits are needed to reach the left side.
|
||||||
|
btl := uint(s.NodeMaskSize - halfV6NBits)
|
||||||
|
indexMaxBit := uint(64 - bits.LeadingZeros64(uint64(index)))
|
||||||
|
if indexMaxBit > btl {
|
||||||
|
leftClusterIP |= uint64(index) >> btl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// the right side will be calculated the same way either the
|
||||||
|
// subNetMaskSize affects both left and right sides.
|
||||||
|
rightClusterIP |= uint64(index) << uint(v6NBits-s.NodeMaskSize)
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint64(ip[:halfIPv6Len], leftClusterIP)
|
||||||
|
binary.BigEndian.PutUint64(ip[halfIPv6Len:], rightClusterIP)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid IP: %s", s.ClusterCIDR.IP)
|
||||||
|
}
|
||||||
|
return &net.IPNet{
|
||||||
|
IP: ip,
|
||||||
|
Mask: s.nodeMask,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextCandidate returns the next candidate and the last evaluated index
|
||||||
|
// for the current cidrSet. Returns nil if the candidate is already allocated.
|
||||||
|
func (s *MultiCIDRSet) NextCandidate() (*net.IPNet, int, error) {
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
|
||||||
|
if s.allocatedCIDRs == s.MaxCIDRs {
|
||||||
|
return nil, 0, &CIDRRangeNoCIDRsRemainingErr{
|
||||||
|
CIDR: s.Label,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
candidate := s.nextCandidate
|
||||||
|
for i := 0; i < s.MaxCIDRs; i++ {
|
||||||
|
nextCandidateCIDR, err := s.indexToCIDRBlock(candidate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, i, err
|
||||||
|
}
|
||||||
|
// Check if the nextCandidate is not already allocated.
|
||||||
|
if _, ok := s.AllocatedCIDRMap[nextCandidateCIDR.String()]; !ok {
|
||||||
|
s.nextCandidate = (candidate + 1) % s.MaxCIDRs
|
||||||
|
return nextCandidateCIDR, i, nil
|
||||||
|
}
|
||||||
|
candidate = (candidate + 1) % s.MaxCIDRs
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, s.MaxCIDRs, &CIDRRangeNoCIDRsRemainingErr{
|
||||||
|
CIDR: s.Label,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBeginningAndEndIndices returns the indices for the given CIDR, returned
|
||||||
|
// values are inclusive indices [beginning, end].
|
||||||
|
func (s *MultiCIDRSet) getBeginningAndEndIndices(cidr *net.IPNet) (int, int, error) {
|
||||||
|
if cidr == nil {
|
||||||
|
return -1, -1, fmt.Errorf("error getting indices for cluster cidr %v, cidr is nil", s.ClusterCIDR)
|
||||||
|
}
|
||||||
|
begin, end := 0, s.MaxCIDRs-1
|
||||||
|
cidrMask := cidr.Mask
|
||||||
|
maskSize, _ := cidrMask.Size()
|
||||||
|
var ipSize int
|
||||||
|
|
||||||
|
if !s.ClusterCIDR.Contains(cidr.IP.Mask(s.ClusterCIDR.Mask)) && !cidr.Contains(s.ClusterCIDR.IP.Mask(cidr.Mask)) {
|
||||||
|
return -1, -1, fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.ClusterCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.clusterMaskSize < maskSize {
|
||||||
|
var err error
|
||||||
|
ipSize = net.IPv4len
|
||||||
|
if netutils.IsIPv6(cidr.IP) {
|
||||||
|
ipSize = net.IPv6len
|
||||||
|
}
|
||||||
|
begin, err = s.getIndexForCIDR(&net.IPNet{
|
||||||
|
IP: cidr.IP.Mask(s.nodeMask),
|
||||||
|
Mask: s.nodeMask,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
ip := make([]byte, ipSize)
|
||||||
|
if netutils.IsIPv4(cidr.IP) {
|
||||||
|
ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask))
|
||||||
|
binary.BigEndian.PutUint32(ip, ipInt)
|
||||||
|
} else {
|
||||||
|
// ipIntLeft | ipIntRight
|
||||||
|
// 2001:0DB8:1234:0000:0000:0000:0000:0000
|
||||||
|
ipIntLeft := binary.BigEndian.Uint64(cidr.IP[:net.IPv6len/2]) | (^binary.BigEndian.Uint64(cidr.Mask[:net.IPv6len/2]))
|
||||||
|
ipIntRight := binary.BigEndian.Uint64(cidr.IP[net.IPv6len/2:]) | (^binary.BigEndian.Uint64(cidr.Mask[net.IPv6len/2:]))
|
||||||
|
binary.BigEndian.PutUint64(ip[:net.IPv6len/2], ipIntLeft)
|
||||||
|
binary.BigEndian.PutUint64(ip[net.IPv6len/2:], ipIntRight)
|
||||||
|
}
|
||||||
|
end, err = s.getIndexForCIDR(&net.IPNet{
|
||||||
|
IP: net.IP(ip).Mask(s.nodeMask),
|
||||||
|
Mask: s.nodeMask,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return begin, end, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release releases the given CIDR range.
|
||||||
|
func (s *MultiCIDRSet) Release(cidr *net.IPNet) error {
|
||||||
|
begin, end, err := s.getBeginningAndEndIndices(cidr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
|
||||||
|
for i := begin; i <= end; i++ {
|
||||||
|
// Remove from the allocated CIDR Map and decrement the counter only if currently
|
||||||
|
// marked allocated. Avoids double counting.
|
||||||
|
currCIDR, err := s.indexToCIDRBlock(i)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, ok := s.AllocatedCIDRMap[currCIDR.String()]; ok {
|
||||||
|
delete(s.AllocatedCIDRMap, currCIDR.String())
|
||||||
|
s.allocatedCIDRs--
|
||||||
|
cidrSetReleases.WithLabelValues(s.Label).Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cidrSetUsage.WithLabelValues(s.Label).Set(float64(s.allocatedCIDRs) / float64(s.MaxCIDRs))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Occupy marks the given CIDR range as used. Occupy succeeds even if the CIDR
|
||||||
|
// range was previously used.
|
||||||
|
func (s *MultiCIDRSet) Occupy(cidr *net.IPNet) (err error) {
|
||||||
|
begin, end, err := s.getBeginningAndEndIndices(cidr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
|
||||||
|
for i := begin; i <= end; i++ {
|
||||||
|
// Add to the allocated CIDR Map and increment the counter only if not already
|
||||||
|
// marked allocated. Prevents double counting.
|
||||||
|
currCIDR, err := s.indexToCIDRBlock(i)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, ok := s.AllocatedCIDRMap[currCIDR.String()]; !ok {
|
||||||
|
s.AllocatedCIDRMap[currCIDR.String()] = true
|
||||||
|
cidrSetAllocations.WithLabelValues(s.Label).Inc()
|
||||||
|
s.allocatedCIDRs++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cidrSetUsage.WithLabelValues(s.Label).Set(float64(s.allocatedCIDRs) / float64(s.MaxCIDRs))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiCIDRSet) getIndexForCIDR(cidr *net.IPNet) (int, error) {
|
||||||
|
return s.getIndexForIP(cidr.IP)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiCIDRSet) getIndexForIP(ip net.IP) (int, error) {
|
||||||
|
if ip.To4() != nil {
|
||||||
|
cidrIndex := (binary.BigEndian.Uint32(s.ClusterCIDR.IP) ^ binary.BigEndian.Uint32(ip.To4())) >> uint32(32-s.NodeMaskSize)
|
||||||
|
if cidrIndex >= uint32(s.MaxCIDRs) {
|
||||||
|
return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.NodeMaskSize)
|
||||||
|
}
|
||||||
|
return int(cidrIndex), nil
|
||||||
|
}
|
||||||
|
if netutils.IsIPv6(ip) {
|
||||||
|
bigIP := big.NewInt(0).SetBytes(s.ClusterCIDR.IP)
|
||||||
|
bigIP = bigIP.Xor(bigIP, big.NewInt(0).SetBytes(ip))
|
||||||
|
cidrIndexBig := bigIP.Rsh(bigIP, uint(net.IPv6len*8-s.NodeMaskSize))
|
||||||
|
cidrIndex := cidrIndexBig.Uint64()
|
||||||
|
if cidrIndex >= uint64(s.MaxCIDRs) {
|
||||||
|
return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.NodeMaskSize)
|
||||||
|
}
|
||||||
|
return int(cidrIndex), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("invalid IP: %v", ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateEvaluatedCount increments the evaluated count.
|
||||||
|
func (s *MultiCIDRSet) UpdateEvaluatedCount(evaluated int) {
|
||||||
|
cidrSetAllocationTriesPerRequest.WithLabelValues(s.Label).Observe(float64(evaluated))
|
||||||
|
}
|
874
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set_test.go
Normal file
874
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set_test.go
Normal file
@ -0,0 +1,874 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package multicidrset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/component-base/metrics/testutil"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
utilnet "k8s.io/utils/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func allocateNext(s *MultiCIDRSet) (*net.IPNet, error) {
|
||||||
|
candidate, _, err := s.NextCandidate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.Occupy(candidate)
|
||||||
|
|
||||||
|
return candidate, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRSetFullyAllocated(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
clusterCIDRStr string
|
||||||
|
perNodeHostBits int
|
||||||
|
expectedCIDR string
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.123.234.0/28",
|
||||||
|
perNodeHostBits: 4,
|
||||||
|
expectedCIDR: "127.123.234.0/28",
|
||||||
|
description: "Fully allocated CIDR with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "beef:1234::/112",
|
||||||
|
perNodeHostBits: 16,
|
||||||
|
expectedCIDR: "beef:1234::/112",
|
||||||
|
description: "Fully allocated CIDR with IPv6",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
}
|
||||||
|
p, err := allocateNext(a)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
}
|
||||||
|
if p.String() != tc.expectedCIDR {
|
||||||
|
t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v",
|
||||||
|
p.String(), tc.expectedCIDR, tc.description)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.Release(p)
|
||||||
|
|
||||||
|
p, err = allocateNext(a)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
}
|
||||||
|
if p.String() != tc.expectedCIDR {
|
||||||
|
t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v",
|
||||||
|
p.String(), tc.expectedCIDR, tc.description)
|
||||||
|
}
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIndexToCIDRBlock(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
clusterCIDRStr string
|
||||||
|
perNodeHostBits int
|
||||||
|
index int
|
||||||
|
CIDRBlock string
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.123.3.0/16",
|
||||||
|
perNodeHostBits: 8,
|
||||||
|
index: 0,
|
||||||
|
CIDRBlock: "127.123.0.0/24",
|
||||||
|
description: "1st IP address indexed with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.123.0.0/16",
|
||||||
|
perNodeHostBits: 8,
|
||||||
|
index: 15,
|
||||||
|
CIDRBlock: "127.123.15.0/24",
|
||||||
|
description: "16th IP address indexed with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "192.168.5.219/28",
|
||||||
|
perNodeHostBits: 0,
|
||||||
|
index: 5,
|
||||||
|
CIDRBlock: "192.168.5.213/32",
|
||||||
|
description: "5th IP address indexed with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234:3::/48",
|
||||||
|
perNodeHostBits: 64,
|
||||||
|
index: 0,
|
||||||
|
CIDRBlock: "2001:db8:1234::/64",
|
||||||
|
description: "1st IP address indexed with IPv6 /64",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234::/48",
|
||||||
|
perNodeHostBits: 64,
|
||||||
|
index: 15,
|
||||||
|
CIDRBlock: "2001:db8:1234:f::/64",
|
||||||
|
description: "16th IP address indexed with IPv6 /64",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/50",
|
||||||
|
perNodeHostBits: 65,
|
||||||
|
index: 6425,
|
||||||
|
CIDRBlock: "2001:db8:85a3:3232::/63",
|
||||||
|
description: "6426th IP address indexed with IPv6 /63",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8::/32",
|
||||||
|
perNodeHostBits: 80,
|
||||||
|
index: 0,
|
||||||
|
CIDRBlock: "2001:db8::/48",
|
||||||
|
description: "1st IP address indexed with IPv6 /48",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8::/32",
|
||||||
|
perNodeHostBits: 80,
|
||||||
|
index: 15,
|
||||||
|
CIDRBlock: "2001:db8:f::/48",
|
||||||
|
description: "16th IP address indexed with IPv6 /48",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/32",
|
||||||
|
perNodeHostBits: 80,
|
||||||
|
index: 6425,
|
||||||
|
CIDRBlock: "2001:db8:1919::/48",
|
||||||
|
description: "6426th IP address indexed with IPv6 /48",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234:ff00::/56",
|
||||||
|
perNodeHostBits: 56,
|
||||||
|
index: 0,
|
||||||
|
CIDRBlock: "2001:db8:1234:ff00::/72",
|
||||||
|
description: "1st IP address indexed with IPv6 /72",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234:ff00::/56",
|
||||||
|
perNodeHostBits: 56,
|
||||||
|
index: 15,
|
||||||
|
CIDRBlock: "2001:db8:1234:ff00:f00::/72",
|
||||||
|
description: "16th IP address indexed with IPv6 /72",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/56",
|
||||||
|
perNodeHostBits: 56,
|
||||||
|
index: 6425,
|
||||||
|
CIDRBlock: "2001:db8:1234:ff19:1900::/72",
|
||||||
|
description: "6426th IP address indexed with IPv6 /72",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234:0:1234::/80",
|
||||||
|
perNodeHostBits: 32,
|
||||||
|
index: 0,
|
||||||
|
CIDRBlock: "2001:db8:1234:0:1234::/96",
|
||||||
|
description: "1st IP address indexed with IPv6 /96",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234:0:1234::/80",
|
||||||
|
perNodeHostBits: 32,
|
||||||
|
index: 15,
|
||||||
|
CIDRBlock: "2001:db8:1234:0:1234:f::/96",
|
||||||
|
description: "16th IP address indexed with IPv6 /96",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/80",
|
||||||
|
perNodeHostBits: 32,
|
||||||
|
index: 6425,
|
||||||
|
CIDRBlock: "2001:db8:1234:ff00:0:1919::/96",
|
||||||
|
description: "6426th IP address indexed with IPv6 /96",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error for %v ", tc.description)
|
||||||
|
}
|
||||||
|
cidr, err := a.indexToCIDRBlock(tc.index)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error for %v ", tc.description)
|
||||||
|
}
|
||||||
|
if cidr.String() != tc.CIDRBlock {
|
||||||
|
t.Fatalf("error for %v index %d %s", tc.description, tc.index, cidr.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRSet_RandomishAllocation(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
clusterCIDRStr string
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.123.234.0/16",
|
||||||
|
description: "RandomishAllocation with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "beef:1234::/112",
|
||||||
|
description: "RandomishAllocation with IPv6",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
|
||||||
|
}
|
||||||
|
// allocate all the CIDRs.
|
||||||
|
var cidrs []*net.IPNet
|
||||||
|
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
if c, err := allocateNext(a); err == nil {
|
||||||
|
cidrs = append(cidrs, c)
|
||||||
|
} else {
|
||||||
|
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||||
|
}
|
||||||
|
// release all the CIDRs.
|
||||||
|
for i := 0; i < len(cidrs); i++ {
|
||||||
|
a.Release(cidrs[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate the CIDRs again.
|
||||||
|
var rcidrs []*net.IPNet
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
if c, err := allocateNext(a); err == nil {
|
||||||
|
rcidrs = append(rcidrs, c)
|
||||||
|
} else {
|
||||||
|
t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(cidrs, rcidrs) {
|
||||||
|
t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRSet_AllocationOccupied(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
clusterCIDRStr string
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.123.234.0/16",
|
||||||
|
description: "AllocationOccupied with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "beef:1234::/112",
|
||||||
|
description: "AllocationOccupied with IPv6",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
|
||||||
|
}
|
||||||
|
// allocate all the CIDRs.
|
||||||
|
var cidrs []*net.IPNet
|
||||||
|
var numCIDRs = 256
|
||||||
|
|
||||||
|
for i := 0; i < numCIDRs; i++ {
|
||||||
|
if c, err := allocateNext(a); err == nil {
|
||||||
|
cidrs = append(cidrs, c)
|
||||||
|
} else {
|
||||||
|
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||||
|
}
|
||||||
|
// release all the CIDRs.
|
||||||
|
for i := 0; i < len(cidrs); i++ {
|
||||||
|
a.Release(cidrs[i])
|
||||||
|
}
|
||||||
|
// occupy the last 128 CIDRs.
|
||||||
|
for i := numCIDRs / 2; i < numCIDRs; i++ {
|
||||||
|
a.Occupy(cidrs[i])
|
||||||
|
}
|
||||||
|
// occupy the first of the last 128 again.
|
||||||
|
a.Occupy(cidrs[numCIDRs/2])
|
||||||
|
|
||||||
|
// allocate the first 128 CIDRs again.
|
||||||
|
var rcidrs []*net.IPNet
|
||||||
|
for i := 0; i < numCIDRs/2; i++ {
|
||||||
|
if c, err := allocateNext(a); err == nil {
|
||||||
|
rcidrs = append(rcidrs, c)
|
||||||
|
} else {
|
||||||
|
t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check Occupy() works properly.
|
||||||
|
for i := numCIDRs / 2; i < numCIDRs; i++ {
|
||||||
|
rcidrs = append(rcidrs, cidrs[i])
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(cidrs, rcidrs) {
|
||||||
|
t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDoubleOccupyRelease(t *testing.T) {
|
||||||
|
// Run a sequence of operations and check the number of occupied CIDRs
|
||||||
|
// after each one.
|
||||||
|
clusterCIDRStr := "10.42.0.0/16"
|
||||||
|
operations := []struct {
|
||||||
|
cidrStr string
|
||||||
|
operation string
|
||||||
|
numOccupied int
|
||||||
|
}{
|
||||||
|
// Occupy 1 element: +1
|
||||||
|
{
|
||||||
|
cidrStr: "10.42.5.0/24",
|
||||||
|
operation: "occupy",
|
||||||
|
numOccupied: 1,
|
||||||
|
},
|
||||||
|
// Occupy 1 more element: +1
|
||||||
|
{
|
||||||
|
cidrStr: "10.42.9.0/24",
|
||||||
|
operation: "occupy",
|
||||||
|
numOccupied: 2,
|
||||||
|
},
|
||||||
|
// Occupy 4 elements overlapping with one from the above: +3
|
||||||
|
{
|
||||||
|
cidrStr: "10.42.8.0/22",
|
||||||
|
operation: "occupy",
|
||||||
|
numOccupied: 5,
|
||||||
|
},
|
||||||
|
// Occupy an already-occupied element: no change
|
||||||
|
{
|
||||||
|
cidrStr: "10.42.9.0/24",
|
||||||
|
operation: "occupy",
|
||||||
|
numOccupied: 5,
|
||||||
|
},
|
||||||
|
// Release an coccupied element: -1
|
||||||
|
{
|
||||||
|
cidrStr: "10.42.9.0/24",
|
||||||
|
operation: "release",
|
||||||
|
numOccupied: 4,
|
||||||
|
},
|
||||||
|
// Release an unoccupied element: no change
|
||||||
|
{
|
||||||
|
cidrStr: "10.42.9.0/24",
|
||||||
|
operation: "release",
|
||||||
|
numOccupied: 4,
|
||||||
|
},
|
||||||
|
// Release 4 elements, only one of which is occupied: -1
|
||||||
|
{
|
||||||
|
cidrStr: "10.42.4.0/22",
|
||||||
|
operation: "release",
|
||||||
|
numOccupied: 3,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Check that there are exactly that many allocatable CIDRs after all
|
||||||
|
// operations have been executed.
|
||||||
|
numAllocatable24s := (1 << 8) - 3
|
||||||
|
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(clusterCIDRStr)
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error allocating CIDRSet")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the operations.
|
||||||
|
for _, op := range operations {
|
||||||
|
_, cidr, _ := utilnet.ParseCIDRSloppy(op.cidrStr)
|
||||||
|
switch op.operation {
|
||||||
|
case "occupy":
|
||||||
|
a.Occupy(cidr)
|
||||||
|
case "release":
|
||||||
|
a.Release(cidr)
|
||||||
|
default:
|
||||||
|
t.Fatalf("test error: unknown operation %v", op.operation)
|
||||||
|
}
|
||||||
|
if a.allocatedCIDRs != op.numOccupied {
|
||||||
|
t.Fatalf("CIDR %v Expected %d occupied CIDRS, got %d", cidr, op.numOccupied, a.allocatedCIDRs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that we can allocate exactly `numAllocatable24s` elements.
|
||||||
|
for i := 0; i < numAllocatable24s; i++ {
|
||||||
|
_, err := allocateNext(a)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected to be able to allocate %d CIDRS, failed after %d", numAllocatable24s, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected to be able to allocate exactly %d CIDRS, got one more", numAllocatable24s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBitforCIDR(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
clusterCIDRStr string
|
||||||
|
perNodeHostBits int
|
||||||
|
subNetCIDRStr string
|
||||||
|
expectedBit int
|
||||||
|
expectErr bool
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.0.0.0/8",
|
||||||
|
perNodeHostBits: 16,
|
||||||
|
subNetCIDRStr: "127.0.0.0/16",
|
||||||
|
expectedBit: 0,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get 0 Bit with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "be00::/8",
|
||||||
|
perNodeHostBits: 112,
|
||||||
|
subNetCIDRStr: "be00::/16",
|
||||||
|
expectedBit: 0,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get 0 Bit with IPv6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.0.0.0/8",
|
||||||
|
perNodeHostBits: 16,
|
||||||
|
subNetCIDRStr: "127.123.0.0/16",
|
||||||
|
expectedBit: 123,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get 123rd Bit with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "be00::/8",
|
||||||
|
perNodeHostBits: 112,
|
||||||
|
subNetCIDRStr: "beef::/16",
|
||||||
|
expectedBit: 0xef,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get xef Bit with IPv6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.0.0.0/8",
|
||||||
|
perNodeHostBits: 16,
|
||||||
|
subNetCIDRStr: "127.168.0.0/16",
|
||||||
|
expectedBit: 168,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get 168th Bit with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "be00::/8",
|
||||||
|
perNodeHostBits: 112,
|
||||||
|
subNetCIDRStr: "be68::/16",
|
||||||
|
expectedBit: 0x68,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get x68th Bit with IPv6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.0.0.0/8",
|
||||||
|
perNodeHostBits: 16,
|
||||||
|
subNetCIDRStr: "127.224.0.0/16",
|
||||||
|
expectedBit: 224,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get 224th Bit with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "be00::/8",
|
||||||
|
perNodeHostBits: 112,
|
||||||
|
subNetCIDRStr: "be24::/16",
|
||||||
|
expectedBit: 0x24,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get x24th Bit with IPv6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "192.168.0.0/16",
|
||||||
|
perNodeHostBits: 8,
|
||||||
|
subNetCIDRStr: "192.168.12.0/24",
|
||||||
|
expectedBit: 12,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get 12th Bit with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "beef::/16",
|
||||||
|
perNodeHostBits: 104,
|
||||||
|
subNetCIDRStr: "beef:1200::/24",
|
||||||
|
expectedBit: 0x12,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get x12th Bit with IPv6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "192.168.0.0/16",
|
||||||
|
perNodeHostBits: 8,
|
||||||
|
subNetCIDRStr: "192.168.151.0/24",
|
||||||
|
expectedBit: 151,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get 151st Bit with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "beef::/16",
|
||||||
|
perNodeHostBits: 104,
|
||||||
|
subNetCIDRStr: "beef:9700::/24",
|
||||||
|
expectedBit: 0x97,
|
||||||
|
expectErr: false,
|
||||||
|
description: "Get x97st Bit with IPv6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "192.168.0.0/16",
|
||||||
|
perNodeHostBits: 8,
|
||||||
|
subNetCIDRStr: "127.168.224.0/24",
|
||||||
|
expectErr: true,
|
||||||
|
description: "Get error with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "beef::/16",
|
||||||
|
perNodeHostBits: 104,
|
||||||
|
subNetCIDRStr: "2001:db00::/24",
|
||||||
|
expectErr: true,
|
||||||
|
description: "Get error with IPv6",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, clusterCIDR, err := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
}
|
||||||
|
|
||||||
|
cs, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
|
||||||
|
}
|
||||||
|
_, subnetCIDR, err := utilnet.ParseCIDRSloppy(tc.subNetCIDRStr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := cs.getIndexForCIDR(subnetCIDR)
|
||||||
|
if err == nil && tc.expectErr {
|
||||||
|
klog.Errorf("expected error but got null for %v", tc.description)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil && !tc.expectErr {
|
||||||
|
klog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if got != tc.expectedBit {
|
||||||
|
klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCIDRSetv6(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
clusterCIDRStr string
|
||||||
|
perNodeHostBits int
|
||||||
|
expectedCIDR string
|
||||||
|
expectedCIDR2 string
|
||||||
|
expectErr bool
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "127.0.0.0/8",
|
||||||
|
perNodeHostBits: 0,
|
||||||
|
expectErr: false,
|
||||||
|
expectedCIDR: "127.0.0.0/32",
|
||||||
|
expectedCIDR2: "127.0.0.1/32",
|
||||||
|
description: "Max cluster subnet size with IPv4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "beef:1234::/32",
|
||||||
|
perNodeHostBits: 79,
|
||||||
|
expectErr: true,
|
||||||
|
description: "Max cluster subnet size with IPv6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
clusterCIDRStr: "2001:beef:1234:369b::/60",
|
||||||
|
perNodeHostBits: 64,
|
||||||
|
expectedCIDR: "2001:beef:1234:3690::/64",
|
||||||
|
expectedCIDR2: "2001:beef:1234:3691::/64",
|
||||||
|
expectErr: false,
|
||||||
|
description: "Allocate a few IPv6",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||||
|
if gotErr := err != nil; gotErr != tc.expectErr {
|
||||||
|
t.Fatalf("NewMultiCIDRSet(%v, %v) = %v, %v; gotErr = %t, want %t", clusterCIDR, tc.perNodeHostBits, a, err, gotErr, tc.expectErr)
|
||||||
|
}
|
||||||
|
if a == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p, err := allocateNext(a)
|
||||||
|
if err == nil && tc.expectErr {
|
||||||
|
t.Errorf("allocateNext(a) = nil, want error")
|
||||||
|
}
|
||||||
|
if err != nil && !tc.expectErr {
|
||||||
|
t.Errorf("allocateNext(a) = %+v, want no error", err)
|
||||||
|
}
|
||||||
|
if !tc.expectErr {
|
||||||
|
if p != nil && p.String() != tc.expectedCIDR {
|
||||||
|
t.Fatalf("allocateNext(a) got %+v, want %+v", p.String(), tc.expectedCIDR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p2, err := allocateNext(a)
|
||||||
|
if err == nil && tc.expectErr {
|
||||||
|
t.Errorf("allocateNext(a) = nil, want error")
|
||||||
|
}
|
||||||
|
if err != nil && !tc.expectErr {
|
||||||
|
t.Errorf("allocateNext(a) = %+v, want no error", err)
|
||||||
|
}
|
||||||
|
if !tc.expectErr {
|
||||||
|
if p2 != nil && p2.String() != tc.expectedCIDR2 {
|
||||||
|
t.Fatalf("allocateNext(a) got %+v, want %+v", p2.String(), tc.expectedCIDR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiCIDRSetMetrics(t *testing.T) {
|
||||||
|
cidr := "10.0.0.0/16"
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||||
|
// We have 256 free cidrs
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||||
|
}
|
||||||
|
clearMetrics(map[string]string{"clusterCIDR": cidr})
|
||||||
|
|
||||||
|
// Allocate next all.
|
||||||
|
for i := 1; i <= 256; i++ {
|
||||||
|
_, err := allocateNext(a)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error allocating a new CIDR: %v", err)
|
||||||
|
}
|
||||||
|
em := testMetrics{
|
||||||
|
usage: float64(i) / float64(256),
|
||||||
|
allocs: float64(i),
|
||||||
|
releases: 0,
|
||||||
|
allocTries: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidr, em)
|
||||||
|
}
|
||||||
|
// Release all CIDRs.
|
||||||
|
a.Release(clusterCIDR)
|
||||||
|
em := testMetrics{
|
||||||
|
usage: 0,
|
||||||
|
allocs: 256,
|
||||||
|
releases: 256,
|
||||||
|
allocTries: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidr, em)
|
||||||
|
|
||||||
|
// Allocate all CIDRs.
|
||||||
|
a.Occupy(clusterCIDR)
|
||||||
|
em = testMetrics{
|
||||||
|
usage: 1,
|
||||||
|
allocs: 512,
|
||||||
|
releases: 256,
|
||||||
|
allocTries: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidr, em)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiCIDRSetMetricsHistogram(t *testing.T) {
|
||||||
|
cidr := "10.0.0.0/16"
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||||
|
// We have 256 free cidrs.
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||||
|
}
|
||||||
|
clearMetrics(map[string]string{"clusterCIDR": cidr})
|
||||||
|
|
||||||
|
// Allocate half of the range.
|
||||||
|
// Occupy does not update the nextCandidate.
|
||||||
|
_, halfClusterCIDR, _ := utilnet.ParseCIDRSloppy("10.0.0.0/17")
|
||||||
|
a.Occupy(halfClusterCIDR)
|
||||||
|
em := testMetrics{
|
||||||
|
usage: 0.5,
|
||||||
|
allocs: 128,
|
||||||
|
releases: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidr, em)
|
||||||
|
// Allocate next should iterate until the next free cidr
|
||||||
|
// that is exactly the same number we allocated previously.
|
||||||
|
_, err = allocateNext(a)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error allocating a new CIDR: %v", err)
|
||||||
|
}
|
||||||
|
em = testMetrics{
|
||||||
|
usage: float64(129) / float64(256),
|
||||||
|
allocs: 129,
|
||||||
|
releases: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidr, em)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiCIDRSetMetricsDual(t *testing.T) {
|
||||||
|
// create IPv4 cidrSet.
|
||||||
|
cidrIPv4 := "10.0.0.0/16"
|
||||||
|
_, clusterCIDRv4, _ := utilnet.ParseCIDRSloppy(cidrIPv4)
|
||||||
|
a, err := NewMultiCIDRSet(clusterCIDRv4, 8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||||
|
}
|
||||||
|
clearMetrics(map[string]string{"clusterCIDR": cidrIPv4})
|
||||||
|
// create IPv6 cidrSet.
|
||||||
|
cidrIPv6 := "2001:db8::/48"
|
||||||
|
_, clusterCIDRv6, _ := utilnet.ParseCIDRSloppy(cidrIPv6)
|
||||||
|
b, err := NewMultiCIDRSet(clusterCIDRv6, 64)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||||
|
}
|
||||||
|
clearMetrics(map[string]string{"clusterCIDR": cidrIPv6})
|
||||||
|
// Allocate all.
|
||||||
|
a.Occupy(clusterCIDRv4)
|
||||||
|
em := testMetrics{
|
||||||
|
usage: 1,
|
||||||
|
allocs: 256,
|
||||||
|
releases: 0,
|
||||||
|
allocTries: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidrIPv4, em)
|
||||||
|
|
||||||
|
b.Occupy(clusterCIDRv6)
|
||||||
|
em = testMetrics{
|
||||||
|
usage: 1,
|
||||||
|
allocs: 65536,
|
||||||
|
releases: 0,
|
||||||
|
allocTries: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidrIPv6, em)
|
||||||
|
|
||||||
|
// Release all.
|
||||||
|
a.Release(clusterCIDRv4)
|
||||||
|
em = testMetrics{
|
||||||
|
usage: 0,
|
||||||
|
allocs: 256,
|
||||||
|
releases: 256,
|
||||||
|
allocTries: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidrIPv4, em)
|
||||||
|
b.Release(clusterCIDRv6)
|
||||||
|
em = testMetrics{
|
||||||
|
usage: 0,
|
||||||
|
allocs: 65536,
|
||||||
|
releases: 65536,
|
||||||
|
allocTries: 0,
|
||||||
|
}
|
||||||
|
expectMetrics(t, cidrIPv6, em)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics helpers.
|
||||||
|
func clearMetrics(labels map[string]string) {
|
||||||
|
cidrSetAllocations.Delete(labels)
|
||||||
|
cidrSetReleases.Delete(labels)
|
||||||
|
cidrSetUsage.Delete(labels)
|
||||||
|
cidrSetAllocationTriesPerRequest.Delete(labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testMetrics struct {
|
||||||
|
usage float64
|
||||||
|
allocs float64
|
||||||
|
releases float64
|
||||||
|
allocTries float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectMetrics(t *testing.T, label string, em testMetrics) {
|
||||||
|
var m testMetrics
|
||||||
|
var err error
|
||||||
|
m.usage, err = testutil.GetGaugeMetricValue(cidrSetUsage.WithLabelValues(label))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to get %s value, err: %v", cidrSetUsage.Name, err)
|
||||||
|
}
|
||||||
|
m.allocs, err = testutil.GetCounterMetricValue(cidrSetAllocations.WithLabelValues(label))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to get %s value, err: %v", cidrSetAllocations.Name, err)
|
||||||
|
}
|
||||||
|
m.releases, err = testutil.GetCounterMetricValue(cidrSetReleases.WithLabelValues(label))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to get %s value, err: %v", cidrSetReleases.Name, err)
|
||||||
|
}
|
||||||
|
m.allocTries, err = testutil.GetHistogramMetricValue(cidrSetAllocationTriesPerRequest.WithLabelValues(label))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to get %s value, err: %v", cidrSetAllocationTriesPerRequest.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m != em {
|
||||||
|
t.Fatalf("metrics error: expected %v, received %v", em, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmarks
|
||||||
|
func benchmarkAllocateAllIPv6(cidr string, perNodeHostBits int, b *testing.B) {
|
||||||
|
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||||
|
a, _ := NewMultiCIDRSet(clusterCIDR, perNodeHostBits)
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
// Allocate the whole range + 1.
|
||||||
|
for i := 0; i <= a.MaxCIDRs; i++ {
|
||||||
|
allocateNext(a)
|
||||||
|
}
|
||||||
|
// Release all.
|
||||||
|
a.Release(clusterCIDR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAllocateAll_48_52(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 52, b) }
|
||||||
|
func BenchmarkAllocateAll_48_56(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 56, b) }
|
||||||
|
|
||||||
|
func BenchmarkAllocateAll_48_60(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 60, b) }
|
||||||
|
func BenchmarkAllocateAll_48_64(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 64, b) }
|
||||||
|
|
||||||
|
func BenchmarkAllocateAll_64_68(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 68, b) }
|
||||||
|
|
||||||
|
func BenchmarkAllocateAll_64_72(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 72, b) }
|
||||||
|
func BenchmarkAllocateAll_64_76(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 76, b) }
|
||||||
|
|
||||||
|
func BenchmarkAllocateAll_64_80(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 80, b) }
|
@ -41,13 +41,6 @@ import (
|
|||||||
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
// cidrs are reserved, then node resource is patched with them
|
|
||||||
// this type holds the reservation info for a node
|
|
||||||
type nodeReservedCIDRs struct {
|
|
||||||
allocatedCIDRs []*net.IPNet
|
|
||||||
nodeName string
|
|
||||||
}
|
|
||||||
|
|
||||||
type rangeAllocator struct {
|
type rangeAllocator struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
// cluster cidrs as passed in during controller creation
|
// cluster cidrs as passed in during controller creation
|
||||||
@ -333,7 +326,7 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error {
|
|||||||
var err error
|
var err error
|
||||||
var node *v1.Node
|
var node *v1.Node
|
||||||
defer r.removeNodeFromProcessing(data.nodeName)
|
defer r.removeNodeFromProcessing(data.nodeName)
|
||||||
cidrsString := cidrsAsString(data.allocatedCIDRs)
|
cidrsString := ipnetToStringList(data.allocatedCIDRs)
|
||||||
node, err = r.nodeLister.Get(data.nodeName)
|
node, err = r.nodeLister.Get(data.nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDRs: %v", data.nodeName, err)
|
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDRs: %v", data.nodeName, err)
|
||||||
@ -391,12 +384,3 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// converts a slice of cidrs into <c-1>,<c-2>,<c-n>
|
|
||||||
func cidrsAsString(inCIDRs []*net.IPNet) []string {
|
|
||||||
outCIDRs := make([]string, len(inCIDRs))
|
|
||||||
for idx, inCIDR := range inCIDRs {
|
|
||||||
outCIDRs[idx] = inCIDR.String()
|
|
||||||
}
|
|
||||||
return outCIDRs
|
|
||||||
}
|
|
||||||
|
@ -25,40 +25,12 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
const testNodePollInterval = 10 * time.Millisecond
|
|
||||||
|
|
||||||
var alwaysReady = func() bool { return true }
|
|
||||||
|
|
||||||
func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
|
|
||||||
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
|
|
||||||
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a fakeNodeInformer using the provided fakeNodeHandler.
|
|
||||||
func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer {
|
|
||||||
fakeClient := &fake.Clientset{}
|
|
||||||
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
|
||||||
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
|
||||||
|
|
||||||
for _, node := range fakeNodeHandler.Existing {
|
|
||||||
fakeNodeInformer.Informer().GetStore().Add(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fakeNodeInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
description string
|
description string
|
||||||
fakeNodeHandler *testutil.FakeNodeHandler
|
fakeNodeHandler *testutil.FakeNodeHandler
|
||||||
@ -305,7 +277,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
|
|||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler)
|
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
||||||
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
||||||
_, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
_, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
||||||
if err == nil && tc.ctrlCreateFail {
|
if err == nil && tc.ctrlCreateFail {
|
||||||
@ -321,7 +293,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
|
|||||||
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||||
// Non-parallel test (overrides global var)
|
// Non-parallel test (overrides global var)
|
||||||
oldNodePollInterval := nodePollInterval
|
oldNodePollInterval := nodePollInterval
|
||||||
nodePollInterval = testNodePollInterval
|
nodePollInterval = test.NodePollInterval
|
||||||
defer func() {
|
defer func() {
|
||||||
nodePollInterval = oldNodePollInterval
|
nodePollInterval = oldNodePollInterval
|
||||||
}()
|
}()
|
||||||
@ -537,7 +509,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||||||
|
|
||||||
// test function
|
// test function
|
||||||
testFunc := func(tc testCase) {
|
testFunc := func(tc testCase) {
|
||||||
fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler)
|
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
||||||
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
||||||
@ -550,7 +522,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
rangeAllocator.nodesSynced = alwaysReady
|
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
go allocator.Run(wait.NeverStop)
|
go allocator.Run(wait.NeverStop)
|
||||||
|
|
||||||
@ -580,7 +552,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||||||
if updateCount != 1 {
|
if updateCount != 1 {
|
||||||
t.Fatalf("test error: all tests must update exactly one node")
|
t.Fatalf("test error: all tests must update exactly one node")
|
||||||
}
|
}
|
||||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil {
|
if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil {
|
||||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -639,7 +611,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
|||||||
|
|
||||||
testFunc := func(tc testCase) {
|
testFunc := func(tc testCase) {
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
||||||
}
|
}
|
||||||
@ -648,7 +620,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
|||||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
rangeAllocator.nodesSynced = alwaysReady
|
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
go allocator.Run(wait.NeverStop)
|
go allocator.Run(wait.NeverStop)
|
||||||
|
|
||||||
@ -708,7 +680,7 @@ type releaseTestCase struct {
|
|||||||
func TestReleaseCIDRSuccess(t *testing.T) {
|
func TestReleaseCIDRSuccess(t *testing.T) {
|
||||||
// Non-parallel test (overrides global var)
|
// Non-parallel test (overrides global var)
|
||||||
oldNodePollInterval := nodePollInterval
|
oldNodePollInterval := nodePollInterval
|
||||||
nodePollInterval = testNodePollInterval
|
nodePollInterval = test.NodePollInterval
|
||||||
defer func() {
|
defer func() {
|
||||||
nodePollInterval = oldNodePollInterval
|
nodePollInterval = oldNodePollInterval
|
||||||
}()
|
}()
|
||||||
@ -784,13 +756,13 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||||||
|
|
||||||
testFunc := func(tc releaseTestCase) {
|
testFunc := func(tc releaseTestCase) {
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
rangeAllocator.nodesSynced = alwaysReady
|
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
go allocator.Run(wait.NeverStop)
|
go allocator.Run(wait.NeverStop)
|
||||||
|
|
||||||
@ -813,7 +785,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||||
}
|
}
|
||||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -841,7 +813,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||||||
if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
|
if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
|
||||||
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||||
}
|
}
|
||||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,10 +18,21 @@ package test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const NodePollInterval = 10 * time.Millisecond
|
||||||
|
|
||||||
|
var AlwaysReady = func() bool { return true }
|
||||||
|
|
||||||
// MustParseCIDR returns the CIDR range parsed from s or panics if the string
|
// MustParseCIDR returns the CIDR range parsed from s or panics if the string
|
||||||
// cannot be parsed.
|
// cannot be parsed.
|
||||||
func MustParseCIDR(s string) *net.IPNet {
|
func MustParseCIDR(s string) *net.IPNet {
|
||||||
@ -31,3 +42,25 @@ func MustParseCIDR(s string) *net.IPNet {
|
|||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FakeNodeInformer creates a fakeNodeInformer using the provided fakeNodeHandler.
|
||||||
|
func FakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer {
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||||
|
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
||||||
|
|
||||||
|
for _, node := range fakeNodeHandler.Existing {
|
||||||
|
fakeNodeInformer.Informer().GetStore().Add(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fakeNodeInformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func WaitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
|
||||||
|
return wait.Poll(NodePollInterval, timeout, func() (bool, error) {
|
||||||
|
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -20,20 +20,18 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
|
networkinginformers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
|
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
|
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
|
||||||
"k8s.io/component-base/metrics/prometheus/ratelimiter"
|
"k8s.io/component-base/metrics/prometheus/ratelimiter"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -74,6 +72,7 @@ type Controller struct {
|
|||||||
// currently, this should be handled as a fatal error.
|
// currently, this should be handled as a fatal error.
|
||||||
func NewNodeIpamController(
|
func NewNodeIpamController(
|
||||||
nodeInformer coreinformers.NodeInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
|
clusterCIDRInformer networkinginformers.ClusterCIDRInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
clusterCIDRs []*net.IPNet,
|
clusterCIDRs []*net.IPNet,
|
||||||
@ -136,7 +135,7 @@ func NewNodeIpamController(
|
|||||||
NodeCIDRMaskSizes: nodeCIDRMaskSizes,
|
NodeCIDRMaskSizes: nodeCIDRMaskSizes,
|
||||||
}
|
}
|
||||||
|
|
||||||
ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, ic.allocatorType, allocatorParams)
|
ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, clusterCIDRInformer, ic.allocatorType, allocatorParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -48,6 +48,7 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet,
|
|||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||||
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
||||||
|
fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs()
|
||||||
|
|
||||||
for _, node := range fakeNodeHandler.Existing {
|
for _, node := range fakeNodeHandler.Existing {
|
||||||
fakeNodeInformer.Informer().GetStore().Add(node)
|
fakeNodeInformer.Informer().GetStore().Add(node)
|
||||||
@ -55,7 +56,7 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet,
|
|||||||
|
|
||||||
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())
|
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())
|
||||||
return NewNodeIpamController(
|
return NewNodeIpamController(
|
||||||
fakeNodeInformer, fakeGCE, clientSet,
|
fakeNodeInformer, fakeClusterCIDRInformer, fakeGCE, clientSet,
|
||||||
clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, allocatorType,
|
clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, allocatorType,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -78,6 +79,9 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) {
|
|||||||
{"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.RangeAllocatorType, false},
|
{"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.RangeAllocatorType, false},
|
||||||
{"valid_range_allocator_dualstack_dualstackservice", "10.0.0.0/21,2000::/10", "10.1.0.0/21", "3000::/10", []int{24, 98}, ipam.RangeAllocatorType, false},
|
{"valid_range_allocator_dualstack_dualstackservice", "10.0.0.0/21,2000::/10", "10.1.0.0/21", "3000::/10", []int{24, 98}, ipam.RangeAllocatorType, false},
|
||||||
|
|
||||||
|
{"valid_multi_cidr_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.MultiCIDRRangeAllocatorType, false},
|
||||||
|
{"valid_multi_cidr_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.MultiCIDRRangeAllocatorType, false},
|
||||||
|
|
||||||
{"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.CloudAllocatorType, false},
|
{"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.CloudAllocatorType, false},
|
||||||
{"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromClusterAllocatorType, false},
|
{"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromClusterAllocatorType, false},
|
||||||
{"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromCloudAllocatorType, false},
|
{"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromCloudAllocatorType, false},
|
||||||
|
@ -45,6 +45,7 @@ import (
|
|||||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||||
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
||||||
networkingapiv1 "k8s.io/api/networking/v1"
|
networkingapiv1 "k8s.io/api/networking/v1"
|
||||||
|
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
nodev1 "k8s.io/api/node/v1"
|
nodev1 "k8s.io/api/node/v1"
|
||||||
nodev1beta1 "k8s.io/api/node/v1beta1"
|
nodev1beta1 "k8s.io/api/node/v1beta1"
|
||||||
policyapiv1 "k8s.io/api/policy/v1"
|
policyapiv1 "k8s.io/api/policy/v1"
|
||||||
@ -689,6 +690,7 @@ var (
|
|||||||
// alphaAPIGroupVersionsDisabledByDefault holds the alpha APIs we have. They are always disabled by default.
|
// alphaAPIGroupVersionsDisabledByDefault holds the alpha APIs we have. They are always disabled by default.
|
||||||
alphaAPIGroupVersionsDisabledByDefault = []schema.GroupVersion{
|
alphaAPIGroupVersionsDisabledByDefault = []schema.GroupVersion{
|
||||||
apiserverinternalv1alpha1.SchemeGroupVersion,
|
apiserverinternalv1alpha1.SchemeGroupVersion,
|
||||||
|
networkingapiv1alpha1.SchemeGroupVersion,
|
||||||
storageapiv1alpha1.SchemeGroupVersion,
|
storageapiv1alpha1.SchemeGroupVersion,
|
||||||
flowcontrolv1alpha1.SchemeGroupVersion,
|
flowcontrolv1alpha1.SchemeGroupVersion,
|
||||||
}
|
}
|
||||||
|
@ -588,6 +588,13 @@ const (
|
|||||||
// Enables the usage of different protocols in the same Service with type=LoadBalancer
|
// Enables the usage of different protocols in the same Service with type=LoadBalancer
|
||||||
MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService"
|
MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService"
|
||||||
|
|
||||||
|
// owner: @sarveshr7
|
||||||
|
// kep: http://kep.k8s.io/2593
|
||||||
|
// alpha: v1.25
|
||||||
|
//
|
||||||
|
// Enables the MultiCIDR Range allocator.
|
||||||
|
MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator"
|
||||||
|
|
||||||
// owner: @rikatz
|
// owner: @rikatz
|
||||||
// kep: http://kep.k8s.io/2079
|
// kep: http://kep.k8s.io/2079
|
||||||
// alpha: v1.21
|
// alpha: v1.21
|
||||||
@ -1042,6 +1049,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
|
|
||||||
MixedProtocolLBService: {Default: true, PreRelease: featuregate.Beta},
|
MixedProtocolLBService: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
|
||||||
|
MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
|
||||||
NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||||
|
|
||||||
NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha},
|
NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
143
pkg/generated/openapi/zz_generated.openapi.go
generated
143
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -687,6 +687,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
|||||||
"k8s.io/api/networking/v1.NetworkPolicySpec": schema_k8sio_api_networking_v1_NetworkPolicySpec(ref),
|
"k8s.io/api/networking/v1.NetworkPolicySpec": schema_k8sio_api_networking_v1_NetworkPolicySpec(ref),
|
||||||
"k8s.io/api/networking/v1.NetworkPolicyStatus": schema_k8sio_api_networking_v1_NetworkPolicyStatus(ref),
|
"k8s.io/api/networking/v1.NetworkPolicyStatus": schema_k8sio_api_networking_v1_NetworkPolicyStatus(ref),
|
||||||
"k8s.io/api/networking/v1.ServiceBackendPort": schema_k8sio_api_networking_v1_ServiceBackendPort(ref),
|
"k8s.io/api/networking/v1.ServiceBackendPort": schema_k8sio_api_networking_v1_ServiceBackendPort(ref),
|
||||||
|
"k8s.io/api/networking/v1alpha1.ClusterCIDR": schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref),
|
||||||
|
"k8s.io/api/networking/v1alpha1.ClusterCIDRList": schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref),
|
||||||
|
"k8s.io/api/networking/v1alpha1.ClusterCIDRSpec": schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref),
|
||||||
"k8s.io/api/networking/v1beta1.HTTPIngressPath": schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref),
|
"k8s.io/api/networking/v1beta1.HTTPIngressPath": schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref),
|
||||||
"k8s.io/api/networking/v1beta1.HTTPIngressRuleValue": schema_k8sio_api_networking_v1beta1_HTTPIngressRuleValue(ref),
|
"k8s.io/api/networking/v1beta1.HTTPIngressRuleValue": schema_k8sio_api_networking_v1beta1_HTTPIngressRuleValue(ref),
|
||||||
"k8s.io/api/networking/v1beta1.Ingress": schema_k8sio_api_networking_v1beta1_Ingress(ref),
|
"k8s.io/api/networking/v1beta1.Ingress": schema_k8sio_api_networking_v1beta1_Ingress(ref),
|
||||||
@ -34339,6 +34342,146 @@ func schema_k8sio_api_networking_v1_ServiceBackendPort(ref common.ReferenceCallb
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||||
|
return common.OpenAPIDefinition{
|
||||||
|
Schema: spec.Schema{
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.",
|
||||||
|
Type: []string{"object"},
|
||||||
|
Properties: map[string]spec.Schema{
|
||||||
|
"kind": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||||
|
Type: []string{"string"},
|
||||||
|
Format: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"apiVersion": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||||
|
Type: []string{"string"},
|
||||||
|
Format: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"metadata": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||||
|
Default: map[string]interface{}{},
|
||||||
|
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||||
|
Default: map[string]interface{}{},
|
||||||
|
Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDRSpec"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Dependencies: []string{
|
||||||
|
"k8s.io/api/networking/v1alpha1.ClusterCIDRSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||||
|
return common.OpenAPIDefinition{
|
||||||
|
Schema: spec.Schema{
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "ClusterCIDRList contains a list of ClusterCIDR.",
|
||||||
|
Type: []string{"object"},
|
||||||
|
Properties: map[string]spec.Schema{
|
||||||
|
"kind": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||||
|
Type: []string{"string"},
|
||||||
|
Format: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"apiVersion": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||||
|
Type: []string{"string"},
|
||||||
|
Format: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"metadata": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||||
|
Default: map[string]interface{}{},
|
||||||
|
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"items": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "Items is the list of ClusterCIDRs.",
|
||||||
|
Type: []string{"array"},
|
||||||
|
Items: &spec.SchemaOrArray{
|
||||||
|
Schema: &spec.Schema{
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Default: map[string]interface{}{},
|
||||||
|
Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDR"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Required: []string{"items"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Dependencies: []string{
|
||||||
|
"k8s.io/api/networking/v1alpha1.ClusterCIDR", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||||
|
return common.OpenAPIDefinition{
|
||||||
|
Schema: spec.Schema{
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "ClusterCIDRSpec defines the desired state of ClusterCIDR.",
|
||||||
|
Type: []string{"object"},
|
||||||
|
Properties: map[string]spec.Schema{
|
||||||
|
"nodeSelector": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.",
|
||||||
|
Ref: ref("k8s.io/api/core/v1.NodeSelector"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"perNodeHostBits": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.",
|
||||||
|
Default: 0,
|
||||||
|
Type: []string{"integer"},
|
||||||
|
Format: "int32",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"ipv4": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||||
|
Default: "",
|
||||||
|
Type: []string{"string"},
|
||||||
|
Format: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"ipv6": {
|
||||||
|
SchemaProps: spec.SchemaProps{
|
||||||
|
Description: "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||||
|
Default: "",
|
||||||
|
Type: []string{"string"},
|
||||||
|
Format: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Required: []string{"perNodeHostBits"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Dependencies: []string{
|
||||||
|
"k8s.io/api/core/v1.NodeSelector"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
func schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||||
return common.OpenAPIDefinition{
|
return common.OpenAPIDefinition{
|
||||||
Schema: spec.Schema{
|
Schema: spec.Schema{
|
||||||
|
@ -71,6 +71,7 @@ func NewStorageFactoryConfig() *StorageFactoryConfig {
|
|||||||
//
|
//
|
||||||
// TODO (https://github.com/kubernetes/kubernetes/issues/108451): remove the override in 1.25.
|
// TODO (https://github.com/kubernetes/kubernetes/issues/108451): remove the override in 1.25.
|
||||||
// apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"),
|
// apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"),
|
||||||
|
networking.Resource("clustercidrs").WithVersion("v1alpha1"),
|
||||||
}
|
}
|
||||||
|
|
||||||
return &StorageFactoryConfig{
|
return &StorageFactoryConfig{
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||||
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
@ -591,6 +592,18 @@ func AddHandlers(h printers.PrintHandler) {
|
|||||||
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||||
}
|
}
|
||||||
h.TableHandler(scaleColumnDefinitions, printScale)
|
h.TableHandler(scaleColumnDefinitions, printScale)
|
||||||
|
|
||||||
|
clusterCIDRColumnDefinitions := []metav1.TableColumnDefinition{
|
||||||
|
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||||
|
{Name: "PerNodeHostBits", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["perNodeHostBits"]},
|
||||||
|
{Name: "IPv4", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv4"]},
|
||||||
|
{Name: "IPv6", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv6"]},
|
||||||
|
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||||
|
{Name: "NodeSelector", Type: "string", Priority: 1, Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["nodeSelector"]},
|
||||||
|
}
|
||||||
|
|
||||||
|
h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDR)
|
||||||
|
h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDRList)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pass ports=nil for all ports.
|
// Pass ports=nil for all ports.
|
||||||
@ -2624,6 +2637,57 @@ func printPriorityLevelConfigurationList(list *flowcontrol.PriorityLevelConfigur
|
|||||||
return rows, nil
|
return rows, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printClusterCIDR(obj *networking.ClusterCIDR, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||||
|
row := metav1.TableRow{
|
||||||
|
Object: runtime.RawExtension{Object: obj},
|
||||||
|
}
|
||||||
|
ipv4 := "<none>"
|
||||||
|
ipv6 := "<none>"
|
||||||
|
|
||||||
|
if obj.Spec.IPv4 != "" {
|
||||||
|
ipv4 = obj.Spec.IPv4
|
||||||
|
}
|
||||||
|
if obj.Spec.IPv6 != "" {
|
||||||
|
ipv6 = obj.Spec.IPv6
|
||||||
|
}
|
||||||
|
|
||||||
|
row.Cells = append(row.Cells, obj.Name, fmt.Sprint(obj.Spec.PerNodeHostBits), ipv4, ipv6, translateTimestampSince(obj.CreationTimestamp))
|
||||||
|
if options.Wide {
|
||||||
|
nodeSelector := "<none>"
|
||||||
|
if obj.Spec.NodeSelector != nil {
|
||||||
|
allTerms := make([]string, 0)
|
||||||
|
for _, term := range obj.Spec.NodeSelector.NodeSelectorTerms {
|
||||||
|
if len(term.MatchExpressions) > 0 {
|
||||||
|
matchExpressions := fmt.Sprintf("MatchExpressions: %v", term.MatchExpressions)
|
||||||
|
allTerms = append(allTerms, matchExpressions)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(term.MatchFields) > 0 {
|
||||||
|
matchFields := fmt.Sprintf("MatchFields: %v", term.MatchFields)
|
||||||
|
allTerms = append(allTerms, matchFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodeSelector = strings.Join(allTerms, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
row.Cells = append(row.Cells, nodeSelector)
|
||||||
|
}
|
||||||
|
|
||||||
|
return []metav1.TableRow{row}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printClusterCIDRList(list *networking.ClusterCIDRList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||||
|
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||||
|
for i := range list.Items {
|
||||||
|
r, err := printClusterCIDR(&list.Items[i], options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rows = append(rows, r...)
|
||||||
|
}
|
||||||
|
return rows, nil
|
||||||
|
}
|
||||||
|
|
||||||
func printScale(obj *autoscaling.Scale, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
func printScale(obj *autoscaling.Scale, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||||
row := metav1.TableRow{
|
row := metav1.TableRow{
|
||||||
Object: runtime.RawExtension{Object: obj},
|
Object: runtime.RawExtension{Object: obj},
|
||||||
|
@ -6184,3 +6184,277 @@ func TestTableRowDeepCopyShouldNotPanic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPrintClusterCIDR(t *testing.T) {
|
||||||
|
ipv4CIDR := "10.1.0.0/16"
|
||||||
|
perNodeHostBits := int32(8)
|
||||||
|
ipv6CIDR := "fd00:1:1::/64"
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
ccc networking.ClusterCIDR
|
||||||
|
options printers.GenerateOptions
|
||||||
|
expected []metav1.TableRow
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// Test name, IPv4 only with no node selector.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test1"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test1", "8", ipv4CIDR, "<none>", "<unknown>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, IPv4 only with node selector, Not wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test2"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
// Does NOT get printed.
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test2", "8", ipv4CIDR, "<none>", "<unknown>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, IPv4 only with no node selector, wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test3"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{Wide: true},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test3", "8", ipv4CIDR, "<none>", "<unknown>", "<none>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, IPv4 only with node selector, wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test4"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{Wide: true},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test4", "8", ipv4CIDR, "<none>", "<unknown>", "MatchExpressions: [{foo In [bar]}]"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, IPv6 only with no node selector.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test5"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test5", "8", "<none>", ipv6CIDR, "<unknown>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, IPv6 only with node selector, Not wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test6"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
// Does NOT get printed.
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test6", "8", "<none>", ipv6CIDR, "<unknown>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, IPv6 only with no node selector, wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test7"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{Wide: true},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test7", "8", "<none>", ipv6CIDR, "<unknown>", "<none>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, IPv6 only with node selector, wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test8"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{Wide: true},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test8", "8", "<none>", ipv6CIDR, "<unknown>", "MatchExpressions: [{foo In [bar]}]"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, DualStack with no node selector.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test9"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test9", "8", ipv4CIDR, ipv6CIDR, "<unknown>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name,DualStack with node selector, Not wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test10"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
// Does NOT get printed.
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test10", "8", ipv4CIDR, ipv6CIDR, "<unknown>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, DualStack with no node selector, wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test11"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{Wide: true},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector.
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test11", "8", ipv4CIDR, ipv6CIDR, "<unknown>", "<none>"}}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, DualStack with node selector, wide.
|
||||||
|
ccc: networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "test12"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: perNodeHostBits,
|
||||||
|
IPv4: ipv4CIDR,
|
||||||
|
IPv6: ipv6CIDR,
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: printers.GenerateOptions{Wide: true},
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||||
|
expected: []metav1.TableRow{{Cells: []interface{}{"test12", "8", ipv4CIDR, ipv6CIDR, "<unknown>", "MatchExpressions: [{foo In [bar]}]"}}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
rows, err := printClusterCIDR(&test.ccc, test.options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i := range rows {
|
||||||
|
rows[i].Object.Object = nil
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(test.expected, rows) {
|
||||||
|
t.Errorf("%d mismatch: %s", i, diff.ObjectReflectDiff(test.expected, rows))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector {
|
||||||
|
return &api.NodeSelector{
|
||||||
|
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []api.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: key,
|
||||||
|
Operator: op,
|
||||||
|
Values: values,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrintClusterCIDRList(t *testing.T) {
|
||||||
|
|
||||||
|
cccList := networking.ClusterCIDRList{
|
||||||
|
Items: []networking.ClusterCIDR{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "ccc1"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: int32(8),
|
||||||
|
IPv4: "10.1.0.0/16",
|
||||||
|
IPv6: "fd00:1:1::/64",
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "ccc2"},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: int32(8),
|
||||||
|
IPv4: "10.2.0.0/16",
|
||||||
|
IPv6: "fd00:2:1::/64",
|
||||||
|
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
options printers.GenerateOptions
|
||||||
|
expected []metav1.TableRow
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// Test name, DualStack with node selector, wide.
|
||||||
|
options: printers.GenerateOptions{Wide: false},
|
||||||
|
expected: []metav1.TableRow{
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||||
|
{Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", "<unknown>"}},
|
||||||
|
{Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", "<unknown>"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test name, DualStack with node selector, wide.
|
||||||
|
options: printers.GenerateOptions{Wide: true},
|
||||||
|
expected: []metav1.TableRow{
|
||||||
|
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector.
|
||||||
|
{Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", "<unknown>", "MatchExpressions: [{foo In [bar]}]"}},
|
||||||
|
{Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", "<unknown>", "MatchExpressions: [{foo In [bar]}]"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
rows, err := printClusterCIDRList(&cccList, test.options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error printing service list: %#v", err)
|
||||||
|
}
|
||||||
|
for i := range rows {
|
||||||
|
rows[i].Object.Object = nil
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(test.expected, rows) {
|
||||||
|
t.Errorf("mismatch: %s", diff.ObjectReflectDiff(test.expected, rows))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
17
pkg/registry/networking/clustercidr/doc.go
Normal file
17
pkg/registry/networking/clustercidr/doc.go
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package clustercidr // import "k8s.io/kubernetes/pkg/registry/networking/clustercidr"
|
63
pkg/registry/networking/clustercidr/storage/storage.go
Normal file
63
pkg/registry/networking/clustercidr/storage/storage.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
|
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||||
|
"k8s.io/apiserver/pkg/registry/rest"
|
||||||
|
networkingapi "k8s.io/kubernetes/pkg/apis/networking"
|
||||||
|
"k8s.io/kubernetes/pkg/printers"
|
||||||
|
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||||
|
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||||
|
"k8s.io/kubernetes/pkg/registry/networking/clustercidr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// REST implements a RESTStorage for ClusterCIDRs against etcd.
|
||||||
|
type REST struct {
|
||||||
|
*genericregistry.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewREST returns a RESTStorage object that will work against ClusterCIDRs.
|
||||||
|
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
|
||||||
|
store := &genericregistry.Store{
|
||||||
|
NewFunc: func() runtime.Object { return &networkingapi.ClusterCIDR{} },
|
||||||
|
NewListFunc: func() runtime.Object { return &networkingapi.ClusterCIDRList{} },
|
||||||
|
DefaultQualifiedResource: networkingapi.Resource("clustercidrs"),
|
||||||
|
|
||||||
|
CreateStrategy: clustercidr.Strategy,
|
||||||
|
UpdateStrategy: clustercidr.Strategy,
|
||||||
|
DeleteStrategy: clustercidr.Strategy,
|
||||||
|
|
||||||
|
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||||
|
}
|
||||||
|
options := &generic.StoreOptions{RESTOptions: optsGetter}
|
||||||
|
if err := store.CompleteWithOptions(options); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &REST{store}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement ShortNamesProvider.
|
||||||
|
var _ rest.ShortNamesProvider = &REST{}
|
||||||
|
|
||||||
|
// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource.
|
||||||
|
func (r *REST) ShortNames() []string {
|
||||||
|
return []string{"cc"}
|
||||||
|
}
|
196
pkg/registry/networking/clustercidr/storage/storage_test.go
Normal file
196
pkg/registry/networking/clustercidr/storage/storage_test.go
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
|
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||||
|
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||||
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/networking"
|
||||||
|
_ "k8s.io/kubernetes/pkg/apis/networking/install"
|
||||||
|
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
|
||||||
|
etcdStorage, server := registrytest.NewEtcdStorageForResource(t, networking.Resource("clustercidrs"))
|
||||||
|
restOptions := generic.RESTOptions{
|
||||||
|
StorageConfig: etcdStorage,
|
||||||
|
Decorator: generic.UndecoratedStorage,
|
||||||
|
DeleteCollectionWorkers: 1,
|
||||||
|
ResourcePrefix: "clustercidrs",
|
||||||
|
}
|
||||||
|
clusterCIDRStorage, err := NewREST(restOptions)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||||
|
}
|
||||||
|
return clusterCIDRStorage, server
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
namespace = metav1.NamespaceNone
|
||||||
|
name = "foo-clustercidr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newClusterCIDR() *networking.ClusterCIDR {
|
||||||
|
return &networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: int32(8),
|
||||||
|
IPv4: "10.1.0.0/16",
|
||||||
|
IPv6: "fd00:1:1::/64",
|
||||||
|
NodeSelector: &api.NodeSelector{
|
||||||
|
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []api.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: api.NodeSelectorOpIn,
|
||||||
|
Values: []string{"bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validClusterCIDR() *networking.ClusterCIDR {
|
||||||
|
return newClusterCIDR()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreate(t *testing.T) {
|
||||||
|
storage, server := newStorage(t)
|
||||||
|
defer server.Terminate(t)
|
||||||
|
defer storage.Store.DestroyFunc()
|
||||||
|
|
||||||
|
test := genericregistrytest.New(t, storage.Store)
|
||||||
|
test = test.ClusterScope()
|
||||||
|
validCC := validClusterCIDR()
|
||||||
|
noCIDRCC := validClusterCIDR()
|
||||||
|
noCIDRCC.Spec.IPv4 = ""
|
||||||
|
noCIDRCC.Spec.IPv6 = ""
|
||||||
|
invalidCCPerNodeHostBits := validClusterCIDR()
|
||||||
|
invalidCCPerNodeHostBits.Spec.PerNodeHostBits = 100
|
||||||
|
invalidCCCIDR := validClusterCIDR()
|
||||||
|
invalidCCCIDR.Spec.IPv6 = "10.1.0.0/16"
|
||||||
|
|
||||||
|
test.TestCreate(
|
||||||
|
// valid
|
||||||
|
validCC,
|
||||||
|
//invalid
|
||||||
|
noCIDRCC,
|
||||||
|
invalidCCPerNodeHostBits,
|
||||||
|
invalidCCCIDR,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdate(t *testing.T) {
|
||||||
|
storage, server := newStorage(t)
|
||||||
|
defer server.Terminate(t)
|
||||||
|
defer storage.Store.DestroyFunc()
|
||||||
|
test := genericregistrytest.New(t, storage.Store)
|
||||||
|
test = test.ClusterScope()
|
||||||
|
test.TestUpdate(
|
||||||
|
// valid
|
||||||
|
validClusterCIDR(),
|
||||||
|
// updateFunc
|
||||||
|
func(obj runtime.Object) runtime.Object {
|
||||||
|
object := obj.(*networking.ClusterCIDR)
|
||||||
|
object.Finalizers = []string{"test.k8s.io/test-finalizer"}
|
||||||
|
return object
|
||||||
|
},
|
||||||
|
// invalid updateFunc: ObjectMeta is not to be tampered with.
|
||||||
|
func(obj runtime.Object) runtime.Object {
|
||||||
|
object := obj.(*networking.ClusterCIDR)
|
||||||
|
object.Name = ""
|
||||||
|
return object
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDelete(t *testing.T) {
|
||||||
|
storage, server := newStorage(t)
|
||||||
|
defer server.Terminate(t)
|
||||||
|
defer storage.Store.DestroyFunc()
|
||||||
|
test := genericregistrytest.New(t, storage.Store)
|
||||||
|
test = test.ClusterScope()
|
||||||
|
test.TestDelete(validClusterCIDR())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGet(t *testing.T) {
|
||||||
|
storage, server := newStorage(t)
|
||||||
|
defer server.Terminate(t)
|
||||||
|
defer storage.Store.DestroyFunc()
|
||||||
|
test := genericregistrytest.New(t, storage.Store)
|
||||||
|
test = test.ClusterScope()
|
||||||
|
test.TestGet(validClusterCIDR())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestList(t *testing.T) {
|
||||||
|
storage, server := newStorage(t)
|
||||||
|
defer server.Terminate(t)
|
||||||
|
defer storage.Store.DestroyFunc()
|
||||||
|
test := genericregistrytest.New(t, storage.Store)
|
||||||
|
test = test.ClusterScope()
|
||||||
|
test.TestList(validClusterCIDR())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWatch(t *testing.T) {
|
||||||
|
storage, server := newStorage(t)
|
||||||
|
defer server.Terminate(t)
|
||||||
|
defer storage.Store.DestroyFunc()
|
||||||
|
test := genericregistrytest.New(t, storage.Store)
|
||||||
|
test = test.ClusterScope()
|
||||||
|
test.TestWatch(
|
||||||
|
validClusterCIDR(),
|
||||||
|
// matching labels
|
||||||
|
[]labels.Set{},
|
||||||
|
// not matching labels
|
||||||
|
[]labels.Set{
|
||||||
|
{"a": "c"},
|
||||||
|
{"foo": "bar"},
|
||||||
|
},
|
||||||
|
// matching fields
|
||||||
|
[]fields.Set{
|
||||||
|
{"metadata.name": name},
|
||||||
|
},
|
||||||
|
// not matching fields
|
||||||
|
[]fields.Set{
|
||||||
|
{"metadata.name": "bar"},
|
||||||
|
{"name": name},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShortNames(t *testing.T) {
|
||||||
|
storage, server := newStorage(t)
|
||||||
|
defer server.Terminate(t)
|
||||||
|
defer storage.Store.DestroyFunc()
|
||||||
|
expected := []string{"cc"}
|
||||||
|
registrytest.AssertShortNames(t, storage, expected)
|
||||||
|
}
|
82
pkg/registry/networking/clustercidr/strategy.go
Normal file
82
pkg/registry/networking/clustercidr/strategy.go
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package clustercidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/networking"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/networking/validation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// clusterCIDRStrategy implements verification logic for ClusterCIDRs.
|
||||||
|
type clusterCIDRStrategy struct {
|
||||||
|
runtime.ObjectTyper
|
||||||
|
names.NameGenerator
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strategy is the default logic that applies when creating and updating clusterCIDR objects.
|
||||||
|
var Strategy = clusterCIDRStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||||
|
|
||||||
|
// NamespaceScoped returns false because all clusterCIDRs do not need to be within a namespace.
|
||||||
|
func (clusterCIDRStrategy) NamespaceScoped() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (clusterCIDRStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {}
|
||||||
|
|
||||||
|
func (clusterCIDRStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {}
|
||||||
|
|
||||||
|
// Validate validates a new ClusterCIDR.
|
||||||
|
func (clusterCIDRStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||||
|
clusterCIDR := obj.(*networking.ClusterCIDR)
|
||||||
|
return validation.ValidateClusterCIDR(clusterCIDR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarningsOnCreate returns warnings for the creation of the given object.
|
||||||
|
func (clusterCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonicalize normalizes the object after validation.
|
||||||
|
func (clusterCIDRStrategy) Canonicalize(obj runtime.Object) {}
|
||||||
|
|
||||||
|
// AllowCreateOnUpdate is false for ClusterCIDR; this means POST is needed to create one.
|
||||||
|
func (clusterCIDRStrategy) AllowCreateOnUpdate() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateUpdate is the default update validation for an end user.
|
||||||
|
func (clusterCIDRStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||||
|
validationErrorList := validation.ValidateClusterCIDR(obj.(*networking.ClusterCIDR))
|
||||||
|
updateErrorList := validation.ValidateClusterCIDRUpdate(obj.(*networking.ClusterCIDR), old.(*networking.ClusterCIDR))
|
||||||
|
return append(validationErrorList, updateErrorList...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarningsOnUpdate returns warnings for the given update.
|
||||||
|
func (clusterCIDRStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowUnconditionalUpdate is the default update policy for ClusterCIDR objects.
|
||||||
|
func (clusterCIDRStrategy) AllowUnconditionalUpdate() bool {
|
||||||
|
return true
|
||||||
|
}
|
86
pkg/registry/networking/clustercidr/strategy_test.go
Normal file
86
pkg/registry/networking/clustercidr/strategy_test.go
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package clustercidr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/networking"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newClusterCIDR() networking.ClusterCIDR {
|
||||||
|
return networking.ClusterCIDR{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
Spec: networking.ClusterCIDRSpec{
|
||||||
|
PerNodeHostBits: int32(8),
|
||||||
|
IPv4: "10.1.0.0/16",
|
||||||
|
IPv6: "fd00:1:1::/64",
|
||||||
|
NodeSelector: &api.NodeSelector{
|
||||||
|
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: []api.NodeSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: api.NodeSelectorOpIn,
|
||||||
|
Values: []string{"bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClusterCIDRStrategy(t *testing.T) {
|
||||||
|
ctx := genericapirequest.NewDefaultContext()
|
||||||
|
apiRequest := genericapirequest.RequestInfo{APIGroup: "networking.k8s.io",
|
||||||
|
APIVersion: "v1alpha1",
|
||||||
|
Resource: "clustercidrs",
|
||||||
|
}
|
||||||
|
ctx = genericapirequest.WithRequestInfo(ctx, &apiRequest)
|
||||||
|
if Strategy.NamespaceScoped() {
|
||||||
|
t.Errorf("ClusterCIDRs must be cluster scoped")
|
||||||
|
}
|
||||||
|
if Strategy.AllowCreateOnUpdate() {
|
||||||
|
t.Errorf("ClusterCIDRs should not allow create on update")
|
||||||
|
}
|
||||||
|
|
||||||
|
ccc := newClusterCIDR()
|
||||||
|
Strategy.PrepareForCreate(ctx, &ccc)
|
||||||
|
|
||||||
|
errs := Strategy.Validate(ctx, &ccc)
|
||||||
|
if len(errs) != 0 {
|
||||||
|
t.Errorf("Unexpected error validating %v", errs)
|
||||||
|
}
|
||||||
|
invalidCCC := newClusterCIDR()
|
||||||
|
invalidCCC.ResourceVersion = "4"
|
||||||
|
invalidCCC.Spec = networking.ClusterCIDRSpec{}
|
||||||
|
Strategy.PrepareForUpdate(ctx, &invalidCCC, &ccc)
|
||||||
|
errs = Strategy.ValidateUpdate(ctx, &invalidCCC, &ccc)
|
||||||
|
if len(errs) == 0 {
|
||||||
|
t.Errorf("Expected a validation error")
|
||||||
|
}
|
||||||
|
if invalidCCC.ResourceVersion != "4" {
|
||||||
|
t.Errorf("Incoming resource version on update should not be mutated")
|
||||||
|
}
|
||||||
|
}
|
@ -18,12 +18,14 @@ package rest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
networkingapiv1 "k8s.io/api/networking/v1"
|
networkingapiv1 "k8s.io/api/networking/v1"
|
||||||
|
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
"k8s.io/apiserver/pkg/registry/generic"
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
"k8s.io/apiserver/pkg/registry/rest"
|
"k8s.io/apiserver/pkg/registry/rest"
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/apis/networking"
|
"k8s.io/kubernetes/pkg/apis/networking"
|
||||||
|
clustercidrstore "k8s.io/kubernetes/pkg/registry/networking/clustercidr/storage"
|
||||||
ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage"
|
ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage"
|
||||||
ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage"
|
ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage"
|
||||||
networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage"
|
networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage"
|
||||||
@ -36,6 +38,12 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag
|
|||||||
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
|
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
|
||||||
// TODO refactor the plumbing to provide the information in the APIGroupInfo
|
// TODO refactor the plumbing to provide the information in the APIGroupInfo
|
||||||
|
|
||||||
|
if storageMap, err := p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
||||||
|
return genericapiserver.APIGroupInfo{}, err
|
||||||
|
} else if len(storageMap) > 0 {
|
||||||
|
apiGroupInfo.VersionedResourcesStorageMap[networkingapiv1alpha1.SchemeGroupVersion.Version] = storageMap
|
||||||
|
}
|
||||||
|
|
||||||
if storageMap, err := p.v1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
if storageMap, err := p.v1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
||||||
return genericapiserver.APIGroupInfo{}, err
|
return genericapiserver.APIGroupInfo{}, err
|
||||||
} else if len(storageMap) > 0 {
|
} else if len(storageMap) > 0 {
|
||||||
@ -80,6 +88,20 @@ func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.API
|
|||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) {
|
||||||
|
storage := map[string]rest.Storage{}
|
||||||
|
// clustercidrs
|
||||||
|
if resource := "clustercidrs"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||||
|
clusterCIDRCStorage, err := clustercidrstore.NewREST(restOptionsGetter)
|
||||||
|
if err != nil {
|
||||||
|
return storage, err
|
||||||
|
}
|
||||||
|
storage[resource] = clusterCIDRCStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (p RESTStorageProvider) GroupName() string {
|
func (p RESTStorageProvider) GroupName() string {
|
||||||
return networking.GroupName
|
return networking.GroupName
|
||||||
}
|
}
|
||||||
|
@ -250,6 +250,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
|||||||
// used for pod deletion
|
// used for pod deletion
|
||||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||||
|
rbacv1helpers.NewRule("get", "list", "create", "update").Groups(networkingGroup).Resources("clustercidrs").RuleOrDie(),
|
||||||
eventsRule(),
|
eventsRule(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -907,6 +907,15 @@ items:
|
|||||||
verbs:
|
verbs:
|
||||||
- delete
|
- delete
|
||||||
- list
|
- list
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- clustercidrs
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
- events.k8s.io
|
- events.k8s.io
|
||||||
|
23
staging/src/k8s.io/api/networking/v1alpha1/doc.go
Normal file
23
staging/src/k8s.io/api/networking/v1alpha1/doc.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen=package
|
||||||
|
// +k8s:protobuf-gen=package
|
||||||
|
// +k8s:openapi-gen=true
|
||||||
|
// +k8s:prerelease-lifecycle-gen=true
|
||||||
|
// +groupName=networking.k8s.io
|
||||||
|
|
||||||
|
package v1alpha1 // import "k8s.io/api/networking/v1alpha1"
|
913
staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go
generated
Normal file
913
staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go
generated
Normal file
@ -0,0 +1,913 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
|
||||||
|
io "io"
|
||||||
|
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
v11 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
math "math"
|
||||||
|
math_bits "math/bits"
|
||||||
|
reflect "reflect"
|
||||||
|
strings "strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} }
|
||||||
|
func (*ClusterCIDR) ProtoMessage() {}
|
||||||
|
func (*ClusterCIDR) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_c1b7ac8d7d97acec, []int{0}
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDR) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ClusterCIDR.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDR) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDR) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ClusterCIDR.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} }
|
||||||
|
func (*ClusterCIDRList) ProtoMessage() {}
|
||||||
|
func (*ClusterCIDRList) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_c1b7ac8d7d97acec, []int{1}
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRList) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ClusterCIDRList.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRList) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRList) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} }
|
||||||
|
func (*ClusterCIDRSpec) ProtoMessage() {}
|
||||||
|
func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_c1b7ac8d7d97acec, []int{2}
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ClusterCIDRSpec.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRSpec) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRSpec) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR")
|
||||||
|
proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList")
|
||||||
|
proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1b7ac8d7d97acec)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor_c1b7ac8d7d97acec = []byte{
|
||||||
|
// 506 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x8f, 0xd2, 0x40,
|
||||||
|
0x18, 0xc6, 0xe9, 0x2e, 0x24, 0x6b, 0xc1, 0xb0, 0xe9, 0x45, 0xc2, 0x61, 0x20, 0x9c, 0x48, 0x8c,
|
||||||
|
0x33, 0xb2, 0x21, 0xc4, 0xab, 0xdd, 0x4d, 0x94, 0xc4, 0x3f, 0xd8, 0x4d, 0x3c, 0x18, 0x0f, 0x0e,
|
||||||
|
0xe5, 0xb5, 0x8c, 0xd0, 0xce, 0x64, 0x66, 0xa8, 0xf1, 0xe6, 0x47, 0xf0, 0x2b, 0xe9, 0x89, 0xe3,
|
||||||
|
0x1e, 0xf7, 0x44, 0xa4, 0x7e, 0x01, 0x3f, 0x82, 0x99, 0xa1, 0xbb, 0x94, 0x45, 0x57, 0xbd, 0x75,
|
||||||
|
0xde, 0xf9, 0x3d, 0xcf, 0xfb, 0x3e, 0x7d, 0x5b, 0xf7, 0xc9, 0xec, 0x91, 0xc2, 0x8c, 0x93, 0xd9,
|
||||||
|
0x62, 0x0c, 0x32, 0x01, 0x0d, 0x8a, 0xa4, 0x90, 0x4c, 0xb8, 0x24, 0xf9, 0x05, 0x15, 0x8c, 0x24,
|
||||||
|
0xa0, 0x3f, 0x72, 0x39, 0x63, 0x49, 0x44, 0xd2, 0x1e, 0x9d, 0x8b, 0x29, 0xed, 0x91, 0x08, 0x12,
|
||||||
|
0x90, 0x54, 0xc3, 0x04, 0x0b, 0xc9, 0x35, 0xf7, 0xd0, 0x86, 0xc7, 0x54, 0x30, 0xbc, 0xe5, 0xf1,
|
||||||
|
0x15, 0xdf, 0x7c, 0x10, 0x31, 0x3d, 0x5d, 0x8c, 0x71, 0xc8, 0x63, 0x12, 0xf1, 0x88, 0x13, 0x2b,
|
||||||
|
0x1b, 0x2f, 0xde, 0xdb, 0x93, 0x3d, 0xd8, 0xa7, 0x8d, 0x5d, 0xb3, 0x53, 0x68, 0x1f, 0x72, 0x09,
|
||||||
|
0x24, 0xdd, 0x6b, 0xd9, 0xec, 0x6f, 0x99, 0x98, 0x86, 0x53, 0x96, 0x80, 0xfc, 0x44, 0xc4, 0x2c,
|
||||||
|
0x32, 0x05, 0x45, 0x62, 0xd0, 0xf4, 0x77, 0x2a, 0xf2, 0x27, 0x95, 0x5c, 0x24, 0x9a, 0xc5, 0xb0,
|
||||||
|
0x27, 0x18, 0xfc, 0x4d, 0xa0, 0xc2, 0x29, 0xc4, 0xf4, 0xa6, 0xae, 0xf3, 0xcd, 0x71, 0xab, 0xa7,
|
||||||
|
0xf3, 0x85, 0xd2, 0x20, 0x4f, 0x87, 0x67, 0x81, 0xf7, 0xce, 0x3d, 0x32, 0x33, 0x4d, 0xa8, 0xa6,
|
||||||
|
0x0d, 0xa7, 0xed, 0x74, 0xab, 0x27, 0x0f, 0xf1, 0xf6, 0xa5, 0x5d, 0x5b, 0x63, 0x31, 0x8b, 0x4c,
|
||||||
|
0x41, 0x61, 0x43, 0xe3, 0xb4, 0x87, 0x5f, 0x8e, 0x3f, 0x40, 0xa8, 0x9f, 0x83, 0xa6, 0xbe, 0xb7,
|
||||||
|
0x5c, 0xb5, 0x4a, 0xd9, 0xaa, 0xe5, 0x6e, 0x6b, 0xc1, 0xb5, 0xab, 0xf7, 0xca, 0x2d, 0x2b, 0x01,
|
||||||
|
0x61, 0xe3, 0xc0, 0xba, 0x13, 0x7c, 0xfb, 0x4a, 0x70, 0x61, 0xb8, 0x73, 0x01, 0xa1, 0x5f, 0xcb,
|
||||||
|
0xcd, 0xcb, 0xe6, 0x14, 0x58, 0xab, 0xce, 0x57, 0xc7, 0xad, 0x17, 0xb8, 0x67, 0x4c, 0x69, 0xef,
|
||||||
|
0xed, 0x5e, 0x10, 0xfc, 0x6f, 0x41, 0x8c, 0xda, 0xc6, 0x38, 0xce, 0x3b, 0x1d, 0x5d, 0x55, 0x0a,
|
||||||
|
0x21, 0x46, 0x6e, 0x85, 0x69, 0x88, 0x55, 0xe3, 0xa0, 0x7d, 0xd8, 0xad, 0x9e, 0xdc, 0xff, 0x8f,
|
||||||
|
0x14, 0xfe, 0xdd, 0xdc, 0xb7, 0x32, 0x34, 0x0e, 0xc1, 0xc6, 0xa8, 0xf3, 0x73, 0x37, 0x83, 0x49,
|
||||||
|
0xe7, 0xbd, 0x76, 0x6b, 0x09, 0x9f, 0xc0, 0x39, 0xcc, 0x21, 0xd4, 0x5c, 0xe6, 0x39, 0xda, 0xc5,
|
||||||
|
0x66, 0xe6, 0xb3, 0x33, 0x53, 0xbf, 0x28, 0x70, 0xfe, 0x71, 0xb6, 0x6a, 0xd5, 0x8a, 0x95, 0x60,
|
||||||
|
0xc7, 0xc7, 0x7b, 0xec, 0xd6, 0x05, 0x48, 0x03, 0x3c, 0xe5, 0x4a, 0xfb, 0x4c, 0x2b, 0xbb, 0x8d,
|
||||||
|
0x8a, 0x7f, 0x2f, 0x1f, 0xad, 0x3e, 0xda, 0xbd, 0x0e, 0x6e, 0xf2, 0x5e, 0xdb, 0x2d, 0x33, 0x91,
|
||||||
|
0xf6, 0x1b, 0x87, 0x6d, 0xa7, 0x7b, 0x67, 0xbb, 0x94, 0xe1, 0x28, 0xed, 0x07, 0xf6, 0x26, 0x27,
|
||||||
|
0x06, 0x8d, 0xf2, 0x1e, 0x31, 0xb0, 0xc4, 0xc0, 0x3f, 0x5b, 0xae, 0x51, 0xe9, 0x62, 0x8d, 0x4a,
|
||||||
|
0x97, 0x6b, 0x54, 0xfa, 0x9c, 0x21, 0x67, 0x99, 0x21, 0xe7, 0x22, 0x43, 0xce, 0x65, 0x86, 0x9c,
|
||||||
|
0xef, 0x19, 0x72, 0xbe, 0xfc, 0x40, 0xa5, 0x37, 0xe8, 0xf6, 0x7f, 0xfc, 0x57, 0x00, 0x00, 0x00,
|
||||||
|
0xff, 0xff, 0xdf, 0x1d, 0xe9, 0x86, 0x1d, 0x04, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
{
|
||||||
|
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
{
|
||||||
|
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Items) > 0 {
|
||||||
|
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
{
|
||||||
|
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
i -= len(m.IPv6)
|
||||||
|
copy(dAtA[i:], m.IPv6)
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x22
|
||||||
|
i -= len(m.IPv4)
|
||||||
|
copy(dAtA[i:], m.IPv4)
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
if m.NodeSelector != nil {
|
||||||
|
{
|
||||||
|
size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||||
|
offset -= sovGenerated(v)
|
||||||
|
base := offset
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDR) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = m.ObjectMeta.Size()
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
l = m.Spec.Size()
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRList) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = m.ListMeta.Size()
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
if len(m.Items) > 0 {
|
||||||
|
for _, e := range m.Items {
|
||||||
|
l = e.Size()
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClusterCIDRSpec) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.NodeSelector != nil {
|
||||||
|
l = m.NodeSelector.Size()
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
}
|
||||||
|
n += 1 + sovGenerated(uint64(m.PerNodeHostBits))
|
||||||
|
l = len(m.IPv4)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
l = len(m.IPv6)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovGenerated(x uint64) (n int) {
|
||||||
|
return (math_bits.Len64(x|1) + 6) / 7
|
||||||
|
}
|
||||||
|
func sozGenerated(x uint64) (n int) {
|
||||||
|
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *ClusterCIDR) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&ClusterCIDR{`,
|
||||||
|
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
|
||||||
|
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *ClusterCIDRList) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
repeatedStringForItems := "[]ClusterCIDR{"
|
||||||
|
for _, f := range this.Items {
|
||||||
|
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + ","
|
||||||
|
}
|
||||||
|
repeatedStringForItems += "}"
|
||||||
|
s := strings.Join([]string{`&ClusterCIDRList{`,
|
||||||
|
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
|
||||||
|
`Items:` + repeatedStringForItems + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *ClusterCIDRSpec) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&ClusterCIDRSpec{`,
|
||||||
|
`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
|
||||||
|
`PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`,
|
||||||
|
`IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`,
|
||||||
|
`IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringGenerated(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDR) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Items = append(m.Items, ClusterCIDR{})
|
||||||
|
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.NodeSelector == nil {
|
||||||
|
m.NodeSelector = &v11.NodeSelector{}
|
||||||
|
}
|
||||||
|
if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType)
|
||||||
|
}
|
||||||
|
m.PerNodeHostBits = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.PerNodeHostBits |= int32(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.IPv4 = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 4:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.IPv6 = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
depth := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
case 3:
|
||||||
|
depth++
|
||||||
|
case 4:
|
||||||
|
if depth == 0 {
|
||||||
|
return 0, ErrUnexpectedEndOfGroupGenerated
|
||||||
|
}
|
||||||
|
depth--
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
if iNdEx < 0 {
|
||||||
|
return 0, ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if depth == 0 {
|
||||||
|
return iNdEx, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
|
||||||
|
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
|
||||||
|
)
|
94
staging/src/k8s.io/api/networking/v1alpha1/generated.proto
Normal file
94
staging/src/k8s.io/api/networking/v1alpha1/generated.proto
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
|
||||||
|
package k8s.io.api.networking.v1alpha1;
|
||||||
|
|
||||||
|
import "k8s.io/api/core/v1/generated.proto";
|
||||||
|
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||||
|
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||||
|
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||||
|
|
||||||
|
// Package-wide variables from generator "generated".
|
||||||
|
option go_package = "k8s.io/api/networking/v1alpha1";
|
||||||
|
|
||||||
|
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||||
|
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||||
|
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||||
|
// resources, all of which will be considered when allocating a CIDR for a
|
||||||
|
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||||
|
// selector matches the node in question and has free CIDRs to allocate. In
|
||||||
|
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||||
|
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||||
|
// selector matches the Node may be used.
|
||||||
|
message ClusterCIDR {
|
||||||
|
// Standard object's metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||||
|
|
||||||
|
// Spec is the desired state of the ClusterCIDR.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||||
|
// +optional
|
||||||
|
optional ClusterCIDRSpec spec = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDRList contains a list of ClusterCIDR.
|
||||||
|
message ClusterCIDRList {
|
||||||
|
// Standard object's metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||||
|
|
||||||
|
// Items is the list of ClusterCIDRs.
|
||||||
|
repeated ClusterCIDR items = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||||
|
message ClusterCIDRSpec {
|
||||||
|
// NodeSelector defines which nodes the config is applicable to.
|
||||||
|
// An empty or nil NodeSelector selects all nodes.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1;
|
||||||
|
|
||||||
|
// PerNodeHostBits defines the number of host bits to be configured per node.
|
||||||
|
// A subnet mask determines how much of the address is used for network bits
|
||||||
|
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||||
|
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||||
|
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||||
|
// Minimum value is 4 (16 IPs).
|
||||||
|
// This field is immutable.
|
||||||
|
// +required
|
||||||
|
optional int32 perNodeHostBits = 2;
|
||||||
|
|
||||||
|
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||||
|
// At least one of IPv4 and IPv6 must be specified.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
optional string ipv4 = 3;
|
||||||
|
|
||||||
|
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
|
||||||
|
// At least one of IPv4 and IPv6 must be specified.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
optional string ipv6 = 4;
|
||||||
|
}
|
||||||
|
|
56
staging/src/k8s.io/api/networking/v1alpha1/register.go
Normal file
56
staging/src/k8s.io/api/networking/v1alpha1/register.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupName is the group name use in this package.
|
||||||
|
const GroupName = "networking.k8s.io"
|
||||||
|
|
||||||
|
// SchemeGroupVersion is group version used to register these objects.
|
||||||
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||||
|
|
||||||
|
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||||
|
func Resource(resource string) schema.GroupResource {
|
||||||
|
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// SchemeBuilder holds functions that add things to a scheme.
|
||||||
|
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||||
|
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||||
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
|
localSchemeBuilder = &SchemeBuilder
|
||||||
|
|
||||||
|
// AddToScheme adds the types of this group into the given scheme.
|
||||||
|
AddToScheme = localSchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
|
|
||||||
|
// Adds the list of known types to the given scheme.
|
||||||
|
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||||
|
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||||
|
&ClusterCIDR{},
|
||||||
|
&ClusterCIDRList{},
|
||||||
|
)
|
||||||
|
// Add the watch version that applies.
|
||||||
|
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||||
|
return nil
|
||||||
|
}
|
95
staging/src/k8s.io/api/networking/v1alpha1/types.go
Normal file
95
staging/src/k8s.io/api/networking/v1alpha1/types.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +genclient:nonNamespaced
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.25
|
||||||
|
|
||||||
|
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||||
|
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||||
|
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||||
|
// resources, all of which will be considered when allocating a CIDR for a
|
||||||
|
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||||
|
// selector matches the node in question and has free CIDRs to allocate. In
|
||||||
|
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||||
|
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||||
|
// selector matches the Node may be used.
|
||||||
|
type ClusterCIDR struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
// Standard object's metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||||
|
|
||||||
|
// Spec is the desired state of the ClusterCIDR.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||||
|
// +optional
|
||||||
|
Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||||
|
type ClusterCIDRSpec struct {
|
||||||
|
// NodeSelector defines which nodes the config is applicable to.
|
||||||
|
// An empty or nil NodeSelector selects all nodes.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
|
||||||
|
|
||||||
|
// PerNodeHostBits defines the number of host bits to be configured per node.
|
||||||
|
// A subnet mask determines how much of the address is used for network bits
|
||||||
|
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||||
|
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||||
|
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||||
|
// Minimum value is 4 (16 IPs).
|
||||||
|
// This field is immutable.
|
||||||
|
// +required
|
||||||
|
PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"`
|
||||||
|
|
||||||
|
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||||
|
// At least one of IPv4 and IPv6 must be specified.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"`
|
||||||
|
|
||||||
|
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
|
||||||
|
// At least one of IPv4 and IPv6 must be specified.
|
||||||
|
// This field is immutable.
|
||||||
|
// +optional
|
||||||
|
IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.25
|
||||||
|
|
||||||
|
// ClusterCIDRList contains a list of ClusterCIDR.
|
||||||
|
type ClusterCIDRList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
// Standard object's metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||||
|
|
||||||
|
// Items is the list of ClusterCIDRs.
|
||||||
|
Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||||
|
}
|
62
staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
generated
Normal file
62
staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
generated
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
// This file contains a collection of methods that can be used from go-restful to
|
||||||
|
// generate Swagger API documentation for its models. Please read this PR for more
|
||||||
|
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||||
|
//
|
||||||
|
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||||
|
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||||
|
// Any context after a --- is ignored.
|
||||||
|
//
|
||||||
|
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||||
|
|
||||||
|
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||||
|
var map_ClusterCIDR = map[string]string{
|
||||||
|
"": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.",
|
||||||
|
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||||
|
"spec": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ClusterCIDR) SwaggerDoc() map[string]string {
|
||||||
|
return map_ClusterCIDR
|
||||||
|
}
|
||||||
|
|
||||||
|
var map_ClusterCIDRList = map[string]string{
|
||||||
|
"": "ClusterCIDRList contains a list of ClusterCIDR.",
|
||||||
|
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||||
|
"items": "Items is the list of ClusterCIDRs.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ClusterCIDRList) SwaggerDoc() map[string]string {
|
||||||
|
return map_ClusterCIDRList
|
||||||
|
}
|
||||||
|
|
||||||
|
var map_ClusterCIDRSpec = map[string]string{
|
||||||
|
"": "ClusterCIDRSpec defines the desired state of ClusterCIDR.",
|
||||||
|
"nodeSelector": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.",
|
||||||
|
"perNodeHostBits": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.",
|
||||||
|
"ipv4": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||||
|
"ipv6": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ClusterCIDRSpec) SwaggerDoc() map[string]string {
|
||||||
|
return map_ClusterCIDRSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
// AUTO-GENERATED FUNCTIONS END HERE
|
108
staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
generated
Normal file
108
staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
generated
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR.
|
||||||
|
func (in *ClusterCIDR) DeepCopy() *ClusterCIDR {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterCIDR)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ClusterCIDR) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]ClusterCIDR, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList.
|
||||||
|
func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterCIDRList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *ClusterCIDRList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = new(v1.NodeSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec.
|
||||||
|
func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ClusterCIDRSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
58
staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
Normal file
58
staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 25
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||||
|
func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) {
|
||||||
|
return 1, 28
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||||
|
func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) {
|
||||||
|
return 1, 31
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 25
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||||
|
func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) {
|
||||||
|
return 1, 28
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||||
|
func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) {
|
||||||
|
return 1, 31
|
||||||
|
}
|
@ -52,6 +52,7 @@ import (
|
|||||||
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
||||||
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
|
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||||
nodev1 "k8s.io/api/node/v1"
|
nodev1 "k8s.io/api/node/v1"
|
||||||
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
||||||
@ -110,6 +111,7 @@ var groups = []runtime.SchemeBuilder{
|
|||||||
imagepolicyv1alpha1.SchemeBuilder,
|
imagepolicyv1alpha1.SchemeBuilder,
|
||||||
networkingv1.SchemeBuilder,
|
networkingv1.SchemeBuilder,
|
||||||
networkingv1beta1.SchemeBuilder,
|
networkingv1beta1.SchemeBuilder,
|
||||||
|
networkingv1alpha1.SchemeBuilder,
|
||||||
nodev1.SchemeBuilder,
|
nodev1.SchemeBuilder,
|
||||||
nodev1alpha1.SchemeBuilder,
|
nodev1alpha1.SchemeBuilder,
|
||||||
nodev1beta1.SchemeBuilder,
|
nodev1beta1.SchemeBuilder,
|
||||||
|
75
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json
vendored
Normal file
75
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
{
|
||||||
|
"kind": "ClusterCIDR",
|
||||||
|
"apiVersion": "networking.k8s.io/v1alpha1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "nameValue",
|
||||||
|
"generateName": "generateNameValue",
|
||||||
|
"namespace": "namespaceValue",
|
||||||
|
"selfLink": "selfLinkValue",
|
||||||
|
"uid": "uidValue",
|
||||||
|
"resourceVersion": "resourceVersionValue",
|
||||||
|
"generation": 7,
|
||||||
|
"creationTimestamp": "2008-01-01T01:01:01Z",
|
||||||
|
"deletionTimestamp": "2009-01-01T01:01:01Z",
|
||||||
|
"deletionGracePeriodSeconds": 10,
|
||||||
|
"labels": {
|
||||||
|
"labelsKey": "labelsValue"
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"annotationsKey": "annotationsValue"
|
||||||
|
},
|
||||||
|
"ownerReferences": [
|
||||||
|
{
|
||||||
|
"apiVersion": "apiVersionValue",
|
||||||
|
"kind": "kindValue",
|
||||||
|
"name": "nameValue",
|
||||||
|
"uid": "uidValue",
|
||||||
|
"controller": true,
|
||||||
|
"blockOwnerDeletion": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"finalizers": [
|
||||||
|
"finalizersValue"
|
||||||
|
],
|
||||||
|
"managedFields": [
|
||||||
|
{
|
||||||
|
"manager": "managerValue",
|
||||||
|
"operation": "operationValue",
|
||||||
|
"apiVersion": "apiVersionValue",
|
||||||
|
"time": "2004-01-01T01:01:01Z",
|
||||||
|
"fieldsType": "fieldsTypeValue",
|
||||||
|
"fieldsV1": {},
|
||||||
|
"subresource": "subresourceValue"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"nodeSelector": {
|
||||||
|
"nodeSelectorTerms": [
|
||||||
|
{
|
||||||
|
"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "keyValue",
|
||||||
|
"operator": "operatorValue",
|
||||||
|
"values": [
|
||||||
|
"valuesValue"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"matchFields": [
|
||||||
|
{
|
||||||
|
"key": "keyValue",
|
||||||
|
"operator": "operatorValue",
|
||||||
|
"values": [
|
||||||
|
"valuesValue"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"perNodeHostBits": 2,
|
||||||
|
"ipv4": "ipv4Value",
|
||||||
|
"ipv6": "ipv6Value"
|
||||||
|
}
|
||||||
|
}
|
BIN
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb
vendored
Normal file
BIN
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb
vendored
Normal file
Binary file not shown.
50
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml
vendored
Normal file
50
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml
vendored
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1alpha1
|
||||||
|
kind: ClusterCIDR
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
annotationsKey: annotationsValue
|
||||||
|
creationTimestamp: "2008-01-01T01:01:01Z"
|
||||||
|
deletionGracePeriodSeconds: 10
|
||||||
|
deletionTimestamp: "2009-01-01T01:01:01Z"
|
||||||
|
finalizers:
|
||||||
|
- finalizersValue
|
||||||
|
generateName: generateNameValue
|
||||||
|
generation: 7
|
||||||
|
labels:
|
||||||
|
labelsKey: labelsValue
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: apiVersionValue
|
||||||
|
fieldsType: fieldsTypeValue
|
||||||
|
fieldsV1: {}
|
||||||
|
manager: managerValue
|
||||||
|
operation: operationValue
|
||||||
|
subresource: subresourceValue
|
||||||
|
time: "2004-01-01T01:01:01Z"
|
||||||
|
name: nameValue
|
||||||
|
namespace: namespaceValue
|
||||||
|
ownerReferences:
|
||||||
|
- apiVersion: apiVersionValue
|
||||||
|
blockOwnerDeletion: true
|
||||||
|
controller: true
|
||||||
|
kind: kindValue
|
||||||
|
name: nameValue
|
||||||
|
uid: uidValue
|
||||||
|
resourceVersion: resourceVersionValue
|
||||||
|
selfLink: selfLinkValue
|
||||||
|
uid: uidValue
|
||||||
|
spec:
|
||||||
|
ipv4: ipv4Value
|
||||||
|
ipv6: ipv6Value
|
||||||
|
nodeSelector:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: keyValue
|
||||||
|
operator: operatorValue
|
||||||
|
values:
|
||||||
|
- valuesValue
|
||||||
|
matchFields:
|
||||||
|
- key: keyValue
|
||||||
|
operator: operatorValue
|
||||||
|
values:
|
||||||
|
- valuesValue
|
||||||
|
perNodeHostBits: 2
|
@ -9593,6 +9593,41 @@ var schemaYAML = typed.YAMLObject(`types:
|
|||||||
- name: number
|
- name: number
|
||||||
type:
|
type:
|
||||||
scalar: numeric
|
scalar: numeric
|
||||||
|
- name: io.k8s.api.networking.v1alpha1.ClusterCIDR
|
||||||
|
map:
|
||||||
|
fields:
|
||||||
|
- name: apiVersion
|
||||||
|
type:
|
||||||
|
scalar: string
|
||||||
|
- name: kind
|
||||||
|
type:
|
||||||
|
scalar: string
|
||||||
|
- name: metadata
|
||||||
|
type:
|
||||||
|
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
|
||||||
|
default: {}
|
||||||
|
- name: spec
|
||||||
|
type:
|
||||||
|
namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec
|
||||||
|
default: {}
|
||||||
|
- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec
|
||||||
|
map:
|
||||||
|
fields:
|
||||||
|
- name: ipv4
|
||||||
|
type:
|
||||||
|
scalar: string
|
||||||
|
default: ""
|
||||||
|
- name: ipv6
|
||||||
|
type:
|
||||||
|
scalar: string
|
||||||
|
default: ""
|
||||||
|
- name: nodeSelector
|
||||||
|
type:
|
||||||
|
namedType: io.k8s.api.core.v1.NodeSelector
|
||||||
|
- name: perNodeHostBits
|
||||||
|
type:
|
||||||
|
scalar: numeric
|
||||||
|
default: 0
|
||||||
- name: io.k8s.api.networking.v1beta1.HTTPIngressPath
|
- name: io.k8s.api.networking.v1beta1.HTTPIngressPath
|
||||||
map:
|
map:
|
||||||
fields:
|
fields:
|
||||||
|
@ -0,0 +1,66 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
// ListMetaApplyConfiguration represents an declarative configuration of the ListMeta type for use
|
||||||
|
// with apply.
|
||||||
|
type ListMetaApplyConfiguration struct {
|
||||||
|
SelfLink *string `json:"selfLink,omitempty"`
|
||||||
|
ResourceVersion *string `json:"resourceVersion,omitempty"`
|
||||||
|
Continue *string `json:"continue,omitempty"`
|
||||||
|
RemainingItemCount *int64 `json:"remainingItemCount,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListMetaApplyConfiguration constructs an declarative configuration of the ListMeta type for use with
|
||||||
|
// apply.
|
||||||
|
func ListMeta() *ListMetaApplyConfiguration {
|
||||||
|
return &ListMetaApplyConfiguration{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSelfLink sets the SelfLink field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the SelfLink field is set to the value of the last call.
|
||||||
|
func (b *ListMetaApplyConfiguration) WithSelfLink(value string) *ListMetaApplyConfiguration {
|
||||||
|
b.SelfLink = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the ResourceVersion field is set to the value of the last call.
|
||||||
|
func (b *ListMetaApplyConfiguration) WithResourceVersion(value string) *ListMetaApplyConfiguration {
|
||||||
|
b.ResourceVersion = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContinue sets the Continue field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the Continue field is set to the value of the last call.
|
||||||
|
func (b *ListMetaApplyConfiguration) WithContinue(value string) *ListMetaApplyConfiguration {
|
||||||
|
b.Continue = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the RemainingItemCount field is set to the value of the last call.
|
||||||
|
func (b *ListMetaApplyConfiguration) WithRemainingItemCount(value int64) *ListMetaApplyConfiguration {
|
||||||
|
b.RemainingItemCount = &value
|
||||||
|
return b
|
||||||
|
}
|
@ -0,0 +1,247 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
types "k8s.io/apimachinery/pkg/types"
|
||||||
|
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
|
||||||
|
internal "k8s.io/client-go/applyconfigurations/internal"
|
||||||
|
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use
|
||||||
|
// with apply.
|
||||||
|
type ClusterCIDRApplyConfiguration struct {
|
||||||
|
v1.TypeMetaApplyConfiguration `json:",inline"`
|
||||||
|
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
|
||||||
|
Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with
|
||||||
|
// apply.
|
||||||
|
func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b := &ClusterCIDRApplyConfiguration{}
|
||||||
|
b.WithName(name)
|
||||||
|
b.WithKind("ClusterCIDR")
|
||||||
|
b.WithAPIVersion("networking.k8s.io/v1alpha1")
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from
|
||||||
|
// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a
|
||||||
|
// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable),
|
||||||
|
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
|
||||||
|
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
|
||||||
|
// the fieldManager never owned fields any fields.
|
||||||
|
// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API.
|
||||||
|
// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow.
|
||||||
|
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
|
||||||
|
// applied if another fieldManager has updated or force applied any of the previously applied fields.
|
||||||
|
// Experimental!
|
||||||
|
func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) {
|
||||||
|
return extractClusterCIDR(clusterCIDR, fieldManager, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except
|
||||||
|
// that it extracts the status subresource applied configuration.
|
||||||
|
// Experimental!
|
||||||
|
func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) {
|
||||||
|
return extractClusterCIDR(clusterCIDR, fieldManager, "status")
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) {
|
||||||
|
b := &ClusterCIDRApplyConfiguration{}
|
||||||
|
err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b.WithName(clusterCIDR.Name)
|
||||||
|
|
||||||
|
b.WithKind("ClusterCIDR")
|
||||||
|
b.WithAPIVersion("networking.k8s.io/v1alpha1")
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithKind sets the Kind field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the Kind field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.Kind = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the APIVersion field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.APIVersion = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithName sets the Name field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the Name field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.Name = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the GenerateName field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.GenerateName = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithNamespace sets the Namespace field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the Namespace field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.Namespace = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUID sets the UID field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the UID field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.UID = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the ResourceVersion field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.ResourceVersion = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGeneration sets the Generation field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the Generation field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.Generation = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.CreationTimestamp = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.DeletionTimestamp = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
b.DeletionGracePeriodSeconds = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLabels puts the entries into the Labels field in the declarative configuration
|
||||||
|
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the entries provided by each call will be put on the Labels field,
|
||||||
|
// overwriting an existing map entries in Labels field with the same key.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
if b.Labels == nil && len(entries) > 0 {
|
||||||
|
b.Labels = make(map[string]string, len(entries))
|
||||||
|
}
|
||||||
|
for k, v := range entries {
|
||||||
|
b.Labels[k] = v
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAnnotations puts the entries into the Annotations field in the declarative configuration
|
||||||
|
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the entries provided by each call will be put on the Annotations field,
|
||||||
|
// overwriting an existing map entries in Annotations field with the same key.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
if b.Annotations == nil && len(entries) > 0 {
|
||||||
|
b.Annotations = make(map[string]string, len(entries))
|
||||||
|
}
|
||||||
|
for k, v := range entries {
|
||||||
|
b.Annotations[k] = v
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
|
||||||
|
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||||
|
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
for i := range values {
|
||||||
|
if values[i] == nil {
|
||||||
|
panic("nil value passed to WithOwnerReferences")
|
||||||
|
}
|
||||||
|
b.OwnerReferences = append(b.OwnerReferences, *values[i])
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
|
||||||
|
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||||
|
// If called multiple times, values provided by each call will be appended to the Finalizers field.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.ensureObjectMetaApplyConfigurationExists()
|
||||||
|
for i := range values {
|
||||||
|
b.Finalizers = append(b.Finalizers, values[i])
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
|
||||||
|
if b.ObjectMetaApplyConfiguration == nil {
|
||||||
|
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSpec sets the Spec field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the Spec field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration {
|
||||||
|
b.Spec = value
|
||||||
|
return b
|
||||||
|
}
|
@ -0,0 +1,70 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use
|
||||||
|
// with apply.
|
||||||
|
type ClusterCIDRSpecApplyConfiguration struct {
|
||||||
|
NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
|
||||||
|
PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"`
|
||||||
|
IPv4 *string `json:"ipv4,omitempty"`
|
||||||
|
IPv6 *string `json:"ipv6,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with
|
||||||
|
// apply.
|
||||||
|
func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration {
|
||||||
|
return &ClusterCIDRSpecApplyConfiguration{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the NodeSelector field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration {
|
||||||
|
b.NodeSelector = value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the PerNodeHostBits field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration {
|
||||||
|
b.PerNodeHostBits = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIPv4 sets the IPv4 field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the IPv4 field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration {
|
||||||
|
b.IPv4 = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIPv6 sets the IPv6 field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the IPv6 field is set to the value of the last call.
|
||||||
|
func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration {
|
||||||
|
b.IPv6 = &value
|
||||||
|
return b
|
||||||
|
}
|
@ -46,6 +46,7 @@ import (
|
|||||||
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
||||||
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
|
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||||
nodev1 "k8s.io/api/node/v1"
|
nodev1 "k8s.io/api/node/v1"
|
||||||
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
||||||
@ -91,6 +92,7 @@ import (
|
|||||||
applyconfigurationsimagepolicyv1alpha1 "k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1"
|
applyconfigurationsimagepolicyv1alpha1 "k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1"
|
||||||
applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||||
applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
|
applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
|
||||||
|
applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
|
||||||
applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
|
applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
|
||||||
applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1"
|
applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1"
|
||||||
applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1"
|
applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1"
|
||||||
@ -1206,6 +1208,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
|
|||||||
case networkingv1.SchemeGroupVersion.WithKind("ServiceBackendPort"):
|
case networkingv1.SchemeGroupVersion.WithKind("ServiceBackendPort"):
|
||||||
return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{}
|
return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{}
|
||||||
|
|
||||||
|
// Group=networking.k8s.io, Version=v1alpha1
|
||||||
|
case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR"):
|
||||||
|
return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRApplyConfiguration{}
|
||||||
|
case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDRSpec"):
|
||||||
|
return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRSpecApplyConfiguration{}
|
||||||
|
|
||||||
// Group=networking.k8s.io, Version=v1beta1
|
// Group=networking.k8s.io, Version=v1beta1
|
||||||
case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"):
|
case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"):
|
||||||
return &applyconfigurationsnetworkingv1beta1.HTTPIngressPathApplyConfiguration{}
|
return &applyconfigurationsnetworkingv1beta1.HTTPIngressPathApplyConfiguration{}
|
||||||
|
@ -47,6 +47,7 @@ import (
|
|||||||
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
||||||
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||||
nodev1 "k8s.io/api/node/v1"
|
nodev1 "k8s.io/api/node/v1"
|
||||||
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
||||||
@ -272,6 +273,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
|||||||
case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"):
|
case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"):
|
||||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil
|
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil
|
||||||
|
|
||||||
|
// Group=networking.k8s.io, Version=v1alpha1
|
||||||
|
case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"):
|
||||||
|
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil
|
||||||
|
|
||||||
// Group=networking.k8s.io, Version=v1beta1
|
// Group=networking.k8s.io, Version=v1beta1
|
||||||
case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"):
|
case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"):
|
||||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil
|
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil
|
||||||
|
@ -21,6 +21,7 @@ package networking
|
|||||||
import (
|
import (
|
||||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||||
v1 "k8s.io/client-go/informers/networking/v1"
|
v1 "k8s.io/client-go/informers/networking/v1"
|
||||||
|
v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1"
|
||||||
v1beta1 "k8s.io/client-go/informers/networking/v1beta1"
|
v1beta1 "k8s.io/client-go/informers/networking/v1beta1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,6 +29,8 @@ import (
|
|||||||
type Interface interface {
|
type Interface interface {
|
||||||
// V1 provides access to shared informers for resources in V1.
|
// V1 provides access to shared informers for resources in V1.
|
||||||
V1() v1.Interface
|
V1() v1.Interface
|
||||||
|
// V1alpha1 provides access to shared informers for resources in V1alpha1.
|
||||||
|
V1alpha1() v1alpha1.Interface
|
||||||
// V1beta1 provides access to shared informers for resources in V1beta1.
|
// V1beta1 provides access to shared informers for resources in V1beta1.
|
||||||
V1beta1() v1beta1.Interface
|
V1beta1() v1beta1.Interface
|
||||||
}
|
}
|
||||||
@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface {
|
|||||||
return v1.New(g.factory, g.namespace, g.tweakListOptions)
|
return v1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// V1alpha1 returns a new v1alpha1.Interface.
|
||||||
|
func (g *group) V1alpha1() v1alpha1.Interface {
|
||||||
|
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||||
|
}
|
||||||
|
|
||||||
// V1beta1 returns a new v1beta1.Interface.
|
// V1beta1 returns a new v1beta1.Interface.
|
||||||
func (g *group) V1beta1() v1beta1.Interface {
|
func (g *group) V1beta1() v1beta1.Interface {
|
||||||
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
|
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||||
|
@ -0,0 +1,89 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by informer-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
time "time"
|
||||||
|
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
|
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||||
|
kubernetes "k8s.io/client-go/kubernetes"
|
||||||
|
v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1"
|
||||||
|
cache "k8s.io/client-go/tools/cache"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterCIDRInformer provides access to a shared informer and lister for
|
||||||
|
// ClusterCIDRs.
|
||||||
|
type ClusterCIDRInformer interface {
|
||||||
|
Informer() cache.SharedIndexInformer
|
||||||
|
Lister() v1alpha1.ClusterCIDRLister
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterCIDRInformer struct {
|
||||||
|
factory internalinterfaces.SharedInformerFactory
|
||||||
|
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type.
|
||||||
|
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||||
|
// one. This reduces memory footprint and number of connections to the server.
|
||||||
|
func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||||
|
return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type.
|
||||||
|
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||||
|
// one. This reduces memory footprint and number of connections to the server.
|
||||||
|
func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||||
|
return cache.NewSharedIndexInformer(
|
||||||
|
&cache.ListWatch{
|
||||||
|
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||||
|
if tweakListOptions != nil {
|
||||||
|
tweakListOptions(&options)
|
||||||
|
}
|
||||||
|
return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options)
|
||||||
|
},
|
||||||
|
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||||
|
if tweakListOptions != nil {
|
||||||
|
tweakListOptions(&options)
|
||||||
|
}
|
||||||
|
return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&networkingv1alpha1.ClusterCIDR{},
|
||||||
|
resyncPeriod,
|
||||||
|
indexers,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||||
|
return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer {
|
||||||
|
return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister {
|
||||||
|
return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer())
|
||||||
|
}
|
@ -0,0 +1,45 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by informer-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Interface provides access to all the informers in this group version.
|
||||||
|
type Interface interface {
|
||||||
|
// ClusterCIDRs returns a ClusterCIDRInformer.
|
||||||
|
ClusterCIDRs() ClusterCIDRInformer
|
||||||
|
}
|
||||||
|
|
||||||
|
type version struct {
|
||||||
|
factory internalinterfaces.SharedInformerFactory
|
||||||
|
namespace string
|
||||||
|
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Interface.
|
||||||
|
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||||
|
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDRs returns a ClusterCIDRInformer.
|
||||||
|
func (v *version) ClusterCIDRs() ClusterCIDRInformer {
|
||||||
|
return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||||
|
}
|
@ -53,6 +53,7 @@ import (
|
|||||||
flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1"
|
flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1"
|
||||||
flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2"
|
flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2"
|
||||||
networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
|
networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
|
||||||
|
networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
|
||||||
networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1"
|
networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1"
|
||||||
nodev1 "k8s.io/client-go/kubernetes/typed/node/v1"
|
nodev1 "k8s.io/client-go/kubernetes/typed/node/v1"
|
||||||
nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1"
|
nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1"
|
||||||
@ -104,6 +105,7 @@ type Interface interface {
|
|||||||
FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface
|
FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface
|
||||||
FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface
|
FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface
|
||||||
NetworkingV1() networkingv1.NetworkingV1Interface
|
NetworkingV1() networkingv1.NetworkingV1Interface
|
||||||
|
NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface
|
||||||
NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface
|
NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface
|
||||||
NodeV1() nodev1.NodeV1Interface
|
NodeV1() nodev1.NodeV1Interface
|
||||||
NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface
|
NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface
|
||||||
@ -155,6 +157,7 @@ type Clientset struct {
|
|||||||
flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client
|
flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client
|
||||||
flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client
|
flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client
|
||||||
networkingV1 *networkingv1.NetworkingV1Client
|
networkingV1 *networkingv1.NetworkingV1Client
|
||||||
|
networkingV1alpha1 *networkingv1alpha1.NetworkingV1alpha1Client
|
||||||
networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client
|
networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client
|
||||||
nodeV1 *nodev1.NodeV1Client
|
nodeV1 *nodev1.NodeV1Client
|
||||||
nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client
|
nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client
|
||||||
@ -322,6 +325,11 @@ func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
|
|||||||
return c.networkingV1
|
return c.networkingV1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client
|
||||||
|
func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface {
|
||||||
|
return c.networkingV1alpha1
|
||||||
|
}
|
||||||
|
|
||||||
// NetworkingV1beta1 retrieves the NetworkingV1beta1Client
|
// NetworkingV1beta1 retrieves the NetworkingV1beta1Client
|
||||||
func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface {
|
func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface {
|
||||||
return c.networkingV1beta1
|
return c.networkingV1beta1
|
||||||
@ -561,6 +569,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
cs.networkingV1alpha1, err = networkingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
|
cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -672,6 +684,7 @@ func New(c rest.Interface) *Clientset {
|
|||||||
cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c)
|
cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c)
|
||||||
cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c)
|
cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c)
|
||||||
cs.networkingV1 = networkingv1.New(c)
|
cs.networkingV1 = networkingv1.New(c)
|
||||||
|
cs.networkingV1alpha1 = networkingv1alpha1.New(c)
|
||||||
cs.networkingV1beta1 = networkingv1beta1.New(c)
|
cs.networkingV1beta1 = networkingv1beta1.New(c)
|
||||||
cs.nodeV1 = nodev1.New(c)
|
cs.nodeV1 = nodev1.New(c)
|
||||||
cs.nodeV1alpha1 = nodev1alpha1.New(c)
|
cs.nodeV1alpha1 = nodev1alpha1.New(c)
|
||||||
|
@ -84,6 +84,8 @@ import (
|
|||||||
fakeflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake"
|
fakeflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake"
|
||||||
networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
|
networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
|
||||||
fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake"
|
fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake"
|
||||||
|
networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
|
||||||
|
fakenetworkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake"
|
||||||
networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1"
|
networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1"
|
||||||
fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake"
|
fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake"
|
||||||
nodev1 "k8s.io/client-go/kubernetes/typed/node/v1"
|
nodev1 "k8s.io/client-go/kubernetes/typed/node/v1"
|
||||||
@ -317,6 +319,11 @@ func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
|
|||||||
return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake}
|
return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client
|
||||||
|
func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface {
|
||||||
|
return &fakenetworkingv1alpha1.FakeNetworkingV1alpha1{Fake: &c.Fake}
|
||||||
|
}
|
||||||
|
|
||||||
// NetworkingV1beta1 retrieves the NetworkingV1beta1Client
|
// NetworkingV1beta1 retrieves the NetworkingV1beta1Client
|
||||||
func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface {
|
func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface {
|
||||||
return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake}
|
return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake}
|
||||||
|
@ -49,6 +49,7 @@ import (
|
|||||||
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
||||||
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||||
nodev1 "k8s.io/api/node/v1"
|
nodev1 "k8s.io/api/node/v1"
|
||||||
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
||||||
@ -105,6 +106,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||||||
flowcontrolv1beta1.AddToScheme,
|
flowcontrolv1beta1.AddToScheme,
|
||||||
flowcontrolv1beta2.AddToScheme,
|
flowcontrolv1beta2.AddToScheme,
|
||||||
networkingv1.AddToScheme,
|
networkingv1.AddToScheme,
|
||||||
|
networkingv1alpha1.AddToScheme,
|
||||||
networkingv1beta1.AddToScheme,
|
networkingv1beta1.AddToScheme,
|
||||||
nodev1.AddToScheme,
|
nodev1.AddToScheme,
|
||||||
nodev1alpha1.AddToScheme,
|
nodev1alpha1.AddToScheme,
|
||||||
|
@ -49,6 +49,7 @@ import (
|
|||||||
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
|
||||||
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||||
nodev1 "k8s.io/api/node/v1"
|
nodev1 "k8s.io/api/node/v1"
|
||||||
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
nodev1alpha1 "k8s.io/api/node/v1alpha1"
|
||||||
@ -105,6 +106,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||||||
flowcontrolv1beta1.AddToScheme,
|
flowcontrolv1beta1.AddToScheme,
|
||||||
flowcontrolv1beta2.AddToScheme,
|
flowcontrolv1beta2.AddToScheme,
|
||||||
networkingv1.AddToScheme,
|
networkingv1.AddToScheme,
|
||||||
|
networkingv1alpha1.AddToScheme,
|
||||||
networkingv1beta1.AddToScheme,
|
networkingv1beta1.AddToScheme,
|
||||||
nodev1.AddToScheme,
|
nodev1.AddToScheme,
|
||||||
nodev1alpha1.AddToScheme,
|
nodev1alpha1.AddToScheme,
|
||||||
|
@ -0,0 +1,197 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by client-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
json "encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
types "k8s.io/apimachinery/pkg/types"
|
||||||
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
|
networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
|
||||||
|
scheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
rest "k8s.io/client-go/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface.
|
||||||
|
// A group's client should implement this interface.
|
||||||
|
type ClusterCIDRsGetter interface {
|
||||||
|
ClusterCIDRs() ClusterCIDRInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterCIDRInterface has methods to work with ClusterCIDR resources.
|
||||||
|
type ClusterCIDRInterface interface {
|
||||||
|
Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error)
|
||||||
|
Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error)
|
||||||
|
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||||
|
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||||
|
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error)
|
||||||
|
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error)
|
||||||
|
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||||
|
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error)
|
||||||
|
Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error)
|
||||||
|
ClusterCIDRExpansion
|
||||||
|
}
|
||||||
|
|
||||||
|
// clusterCIDRs implements ClusterCIDRInterface
|
||||||
|
type clusterCIDRs struct {
|
||||||
|
client rest.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
// newClusterCIDRs returns a ClusterCIDRs
|
||||||
|
func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs {
|
||||||
|
return &clusterCIDRs{
|
||||||
|
client: c.RESTClient(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any.
|
||||||
|
func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
result = &v1alpha1.ClusterCIDR{}
|
||||||
|
err = c.client.Get().
|
||||||
|
Resource("clustercidrs").
|
||||||
|
Name(name).
|
||||||
|
VersionedParams(&options, scheme.ParameterCodec).
|
||||||
|
Do(ctx).
|
||||||
|
Into(result)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors.
|
||||||
|
func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) {
|
||||||
|
var timeout time.Duration
|
||||||
|
if opts.TimeoutSeconds != nil {
|
||||||
|
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||||
|
}
|
||||||
|
result = &v1alpha1.ClusterCIDRList{}
|
||||||
|
err = c.client.Get().
|
||||||
|
Resource("clustercidrs").
|
||||||
|
VersionedParams(&opts, scheme.ParameterCodec).
|
||||||
|
Timeout(timeout).
|
||||||
|
Do(ctx).
|
||||||
|
Into(result)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch returns a watch.Interface that watches the requested clusterCIDRs.
|
||||||
|
func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||||
|
var timeout time.Duration
|
||||||
|
if opts.TimeoutSeconds != nil {
|
||||||
|
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||||
|
}
|
||||||
|
opts.Watch = true
|
||||||
|
return c.client.Get().
|
||||||
|
Resource("clustercidrs").
|
||||||
|
VersionedParams(&opts, scheme.ParameterCodec).
|
||||||
|
Timeout(timeout).
|
||||||
|
Watch(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
|
||||||
|
func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
result = &v1alpha1.ClusterCIDR{}
|
||||||
|
err = c.client.Post().
|
||||||
|
Resource("clustercidrs").
|
||||||
|
VersionedParams(&opts, scheme.ParameterCodec).
|
||||||
|
Body(clusterCIDR).
|
||||||
|
Do(ctx).
|
||||||
|
Into(result)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
|
||||||
|
func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
result = &v1alpha1.ClusterCIDR{}
|
||||||
|
err = c.client.Put().
|
||||||
|
Resource("clustercidrs").
|
||||||
|
Name(clusterCIDR.Name).
|
||||||
|
VersionedParams(&opts, scheme.ParameterCodec).
|
||||||
|
Body(clusterCIDR).
|
||||||
|
Do(ctx).
|
||||||
|
Into(result)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs.
|
||||||
|
func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||||
|
return c.client.Delete().
|
||||||
|
Resource("clustercidrs").
|
||||||
|
Name(name).
|
||||||
|
Body(&opts).
|
||||||
|
Do(ctx).
|
||||||
|
Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteCollection deletes a collection of objects.
|
||||||
|
func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||||
|
var timeout time.Duration
|
||||||
|
if listOpts.TimeoutSeconds != nil {
|
||||||
|
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||||
|
}
|
||||||
|
return c.client.Delete().
|
||||||
|
Resource("clustercidrs").
|
||||||
|
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||||
|
Timeout(timeout).
|
||||||
|
Body(&opts).
|
||||||
|
Do(ctx).
|
||||||
|
Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patch applies the patch and returns the patched clusterCIDR.
|
||||||
|
func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
result = &v1alpha1.ClusterCIDR{}
|
||||||
|
err = c.client.Patch(pt).
|
||||||
|
Resource("clustercidrs").
|
||||||
|
Name(name).
|
||||||
|
SubResource(subresources...).
|
||||||
|
VersionedParams(&opts, scheme.ParameterCodec).
|
||||||
|
Body(data).
|
||||||
|
Do(ctx).
|
||||||
|
Into(result)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR.
|
||||||
|
func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
if clusterCIDR == nil {
|
||||||
|
return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil")
|
||||||
|
}
|
||||||
|
patchOpts := opts.ToPatchOptions()
|
||||||
|
data, err := json.Marshal(clusterCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
name := clusterCIDR.Name
|
||||||
|
if name == nil {
|
||||||
|
return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply")
|
||||||
|
}
|
||||||
|
result = &v1alpha1.ClusterCIDR{}
|
||||||
|
err = c.client.Patch(types.ApplyPatchType).
|
||||||
|
Resource("clustercidrs").
|
||||||
|
Name(*name).
|
||||||
|
VersionedParams(&patchOpts, scheme.ParameterCodec).
|
||||||
|
Body(data).
|
||||||
|
Do(ctx).
|
||||||
|
Into(result)
|
||||||
|
return
|
||||||
|
}
|
@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by client-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
// This package has the automatically generated typed clients.
|
||||||
|
package v1alpha1
|
@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by client-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Package fake has the automatically generated clients.
|
||||||
|
package fake
|
@ -0,0 +1,146 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by client-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package fake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
json "encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
labels "k8s.io/apimachinery/pkg/labels"
|
||||||
|
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
types "k8s.io/apimachinery/pkg/types"
|
||||||
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
|
networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
|
||||||
|
testing "k8s.io/client-go/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FakeClusterCIDRs implements ClusterCIDRInterface
|
||||||
|
type FakeClusterCIDRs struct {
|
||||||
|
Fake *FakeNetworkingV1alpha1
|
||||||
|
}
|
||||||
|
|
||||||
|
var clustercidrsResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1alpha1", Resource: "clustercidrs"}
|
||||||
|
|
||||||
|
var clustercidrsKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1alpha1", Kind: "ClusterCIDR"}
|
||||||
|
|
||||||
|
// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any.
|
||||||
|
func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
obj, err := c.Fake.
|
||||||
|
Invokes(testing.NewRootGetAction(clustercidrsResource, name), &v1alpha1.ClusterCIDR{})
|
||||||
|
if obj == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return obj.(*v1alpha1.ClusterCIDR), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors.
|
||||||
|
func (c *FakeClusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) {
|
||||||
|
obj, err := c.Fake.
|
||||||
|
Invokes(testing.NewRootListAction(clustercidrsResource, clustercidrsKind, opts), &v1alpha1.ClusterCIDRList{})
|
||||||
|
if obj == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||||
|
if label == nil {
|
||||||
|
label = labels.Everything()
|
||||||
|
}
|
||||||
|
list := &v1alpha1.ClusterCIDRList{ListMeta: obj.(*v1alpha1.ClusterCIDRList).ListMeta}
|
||||||
|
for _, item := range obj.(*v1alpha1.ClusterCIDRList).Items {
|
||||||
|
if label.Matches(labels.Set(item.Labels)) {
|
||||||
|
list.Items = append(list.Items, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return list, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch returns a watch.Interface that watches the requested clusterCIDRs.
|
||||||
|
func (c *FakeClusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||||
|
return c.Fake.
|
||||||
|
InvokesWatch(testing.NewRootWatchAction(clustercidrsResource, opts))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
|
||||||
|
func (c *FakeClusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
obj, err := c.Fake.
|
||||||
|
Invokes(testing.NewRootCreateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{})
|
||||||
|
if obj == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return obj.(*v1alpha1.ClusterCIDR), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any.
|
||||||
|
func (c *FakeClusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
obj, err := c.Fake.
|
||||||
|
Invokes(testing.NewRootUpdateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{})
|
||||||
|
if obj == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return obj.(*v1alpha1.ClusterCIDR), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs.
|
||||||
|
func (c *FakeClusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||||
|
_, err := c.Fake.
|
||||||
|
Invokes(testing.NewRootDeleteActionWithOptions(clustercidrsResource, name, opts), &v1alpha1.ClusterCIDR{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteCollection deletes a collection of objects.
|
||||||
|
func (c *FakeClusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||||
|
action := testing.NewRootDeleteCollectionAction(clustercidrsResource, listOpts)
|
||||||
|
|
||||||
|
_, err := c.Fake.Invokes(action, &v1alpha1.ClusterCIDRList{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patch applies the patch and returns the patched clusterCIDR.
|
||||||
|
func (c *FakeClusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
obj, err := c.Fake.
|
||||||
|
Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, name, pt, data, subresources...), &v1alpha1.ClusterCIDR{})
|
||||||
|
if obj == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return obj.(*v1alpha1.ClusterCIDR), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR.
|
||||||
|
func (c *FakeClusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) {
|
||||||
|
if clusterCIDR == nil {
|
||||||
|
return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil")
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(clusterCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
name := clusterCIDR.Name
|
||||||
|
if name == nil {
|
||||||
|
return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply")
|
||||||
|
}
|
||||||
|
obj, err := c.Fake.
|
||||||
|
Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterCIDR{})
|
||||||
|
if obj == nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return obj.(*v1alpha1.ClusterCIDR), err
|
||||||
|
}
|
@ -0,0 +1,40 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by client-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package fake
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
|
||||||
|
rest "k8s.io/client-go/rest"
|
||||||
|
testing "k8s.io/client-go/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FakeNetworkingV1alpha1 struct {
|
||||||
|
*testing.Fake
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakeNetworkingV1alpha1) ClusterCIDRs() v1alpha1.ClusterCIDRInterface {
|
||||||
|
return &FakeClusterCIDRs{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTClient returns a RESTClient that is used to communicate
|
||||||
|
// with API server by this client implementation.
|
||||||
|
func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface {
|
||||||
|
var ret *rest.RESTClient
|
||||||
|
return ret
|
||||||
|
}
|
@ -0,0 +1,21 @@
|
|||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by client-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
type ClusterCIDRExpansion interface{}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user