diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 3acd35cb01f..58ec09203b2 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -13017,7 +13017,7 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.resource.v1alpha1.AllocationResult": { + "io.k8s.api.resource.v1alpha2.AllocationResult": { "description": "AllocationResult contains attributed of an allocated resource.", "properties": { "availableOnNodes": { @@ -13035,7 +13035,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.PodScheduling": { + "io.k8s.api.resource.v1alpha2.PodScheduling": { "description": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { @@ -13051,11 +13051,11 @@ "description": "Standard object metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodSchedulingSpec", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingSpec", "description": "Spec describes where resources for the Pod are needed." }, "status": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodSchedulingStatus", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingStatus", "description": "Status describes where resources for the Pod can be allocated." } }, @@ -13067,11 +13067,11 @@ { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.PodSchedulingList": { + "io.k8s.api.resource.v1alpha2.PodSchedulingList": { "description": "PodSchedulingList is a collection of Pod scheduling objects.", "properties": { "apiVersion": { @@ -13081,7 +13081,7 @@ "items": { "description": "Items is the list of PodScheduling objects.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" }, "type": "array" }, @@ -13102,11 +13102,11 @@ { "group": "resource.k8s.io", "kind": "PodSchedulingList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.PodSchedulingSpec": { + "io.k8s.api.resource.v1alpha2.PodSchedulingSpec": { "description": "PodSchedulingSpec describes where resources for the Pod are needed.", "properties": { "potentialNodes": { @@ -13124,13 +13124,13 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.PodSchedulingStatus": { + "io.k8s.api.resource.v1alpha2.PodSchedulingStatus": { "description": "PodSchedulingStatus describes where resources for the Pod can be allocated.", "properties": { "resourceClaims": { "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus" }, "type": "array", "x-kubernetes-list-map-keys": [ @@ -13141,7 +13141,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaim": { + "io.k8s.api.resource.v1alpha2.ResourceClaim": { "description": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { @@ -13157,11 +13157,11 @@ "description": "Standard object metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimSpec", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec", "description": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim." }, "status": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimStatus", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimStatus", "description": "Status describes whether the resource is available and with which attributes." } }, @@ -13173,11 +13173,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference": { + "io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference": { "description": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", "properties": { "apiGroup": { @@ -13204,7 +13204,7 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimList": { + "io.k8s.api.resource.v1alpha2.ResourceClaimList": { "description": "ResourceClaimList is a collection of claims.", "properties": { "apiVersion": { @@ -13214,7 +13214,7 @@ "items": { "description": "Items is the list of resource claims.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" }, "type": "array" }, @@ -13235,11 +13235,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference": { + "io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference": { "description": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", "properties": { "apiGroup": { @@ -13261,7 +13261,7 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus": { + "io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus": { "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", "properties": { "name": { @@ -13279,7 +13279,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimSpec": { + "io.k8s.api.resource.v1alpha2.ResourceClaimSpec": { "description": "ResourceClaimSpec defines how a resource is to be allocated.", "properties": { "allocationMode": { @@ -13287,7 +13287,7 @@ "type": "string" }, "parametersRef": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference", "description": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim." }, "resourceClassName": { @@ -13300,11 +13300,11 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimStatus": { + "io.k8s.api.resource.v1alpha2.ResourceClaimStatus": { "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", "properties": { "allocation": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.AllocationResult", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.AllocationResult", "description": "Allocation is set by the resource driver once a resource has been allocated successfully. If this is not specified, the resource is not yet allocated." }, "deallocationRequested": { @@ -13318,7 +13318,7 @@ "reservedFor": { "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference" }, "type": "array", "x-kubernetes-list-map-keys": [ @@ -13329,7 +13329,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimTemplate": { + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplate": { "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.", "properties": { "apiVersion": { @@ -13345,7 +13345,7 @@ "description": "Standard object metadata" }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec", "description": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore." } }, @@ -13357,11 +13357,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList": { + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList": { "description": "ResourceClaimTemplateList is a collection of claim templates.", "properties": { "apiVersion": { @@ -13371,7 +13371,7 @@ "items": { "description": "Items is the list of resource claim templates.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" }, "type": "array" }, @@ -13392,11 +13392,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplateList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec": { + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec": { "description": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", "properties": { "metadata": { @@ -13404,7 +13404,7 @@ "description": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation." }, "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimSpec", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec", "description": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here." } }, @@ -13413,7 +13413,7 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClass": { + "io.k8s.api.resource.v1alpha2.ResourceClass": { "description": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { @@ -13433,7 +13433,7 @@ "description": "Standard object metadata" }, "parametersRef": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClassParametersReference", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClassParametersReference", "description": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec." }, "suitableNodes": { @@ -13449,11 +13449,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClassList": { + "io.k8s.api.resource.v1alpha2.ResourceClassList": { "description": "ResourceClassList is a collection of classes.", "properties": { "apiVersion": { @@ -13463,7 +13463,7 @@ "items": { "description": "Items is the list of resource classes.", "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" }, "type": "array" }, @@ -13484,11 +13484,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClassList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClassParametersReference": { + "io.k8s.api.resource.v1alpha2.ResourceClassParametersReference": { "description": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", "properties": { "apiGroup": { @@ -15478,7 +15478,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -15850,7 +15850,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -16181,7 +16181,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -78147,7 +78147,7 @@ ] } }, - "/apis/resource.k8s.io/v1alpha1/": { + "/apis/resource.k8s.io/v1alpha2/": { "get": { "consumes": [ "application/json", @@ -78155,7 +78155,7 @@ "application/vnd.kubernetes.protobuf" ], "description": "get available resources", - "operationId": "getResourceV1alpha1APIResources", + "operationId": "getResourceV1alpha2APIResources", "produces": [ "application/json", "application/yaml", @@ -78176,17 +78176,17 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ] } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings": { "delete": { "consumes": [ "*/*" ], "description": "delete collection of PodScheduling", - "operationId": "deleteResourceV1alpha1CollectionNamespacedPodScheduling", + "operationId": "deleteResourceV1alpha2CollectionNamespacedPodScheduling", "parameters": [ { "in": "body", @@ -78300,13 +78300,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -78314,7 +78314,7 @@ "*/*" ], "description": "list or watch objects of kind PodScheduling", - "operationId": "listResourceV1alpha1NamespacedPodScheduling", + "operationId": "listResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -78398,7 +78398,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "401": { @@ -78409,13 +78409,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -78440,14 +78440,14 @@ "*/*" ], "description": "create a PodScheduling", - "operationId": "createResourceV1alpha1NamespacedPodScheduling", + "operationId": "createResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, { @@ -78481,19 +78481,19 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -78504,23 +78504,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/podschedulings/{name}": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}": { "delete": { "consumes": [ "*/*" ], "description": "delete a PodScheduling", - "operationId": "deleteResourceV1alpha1NamespacedPodScheduling", + "operationId": "deleteResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "in": "body", @@ -78567,13 +78567,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -78584,13 +78584,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -78598,7 +78598,7 @@ "*/*" ], "description": "read the specified PodScheduling", - "operationId": "readResourceV1alpha1NamespacedPodScheduling", + "operationId": "readResourceV1alpha2NamespacedPodScheduling", "produces": [ "application/json", "application/yaml", @@ -78608,7 +78608,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -78619,13 +78619,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -78661,7 +78661,7 @@ "application/apply-patch+yaml" ], "description": "partially update the specified PodScheduling", - "operationId": "patchResourceV1alpha1NamespacedPodScheduling", + "operationId": "patchResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "in": "body", @@ -78709,13 +78709,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -78726,13 +78726,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { @@ -78740,14 +78740,14 @@ "*/*" ], "description": "replace the specified PodScheduling", - "operationId": "replaceResourceV1alpha1NamespacedPodScheduling", + "operationId": "replaceResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, { @@ -78781,13 +78781,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -78798,23 +78798,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/podschedulings/{name}/status": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}/status": { "get": { "consumes": [ "*/*" ], "description": "read status of the specified PodScheduling", - "operationId": "readResourceV1alpha1NamespacedPodSchedulingStatus", + "operationId": "readResourceV1alpha2NamespacedPodSchedulingStatus", "produces": [ "application/json", "application/yaml", @@ -78824,7 +78824,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -78835,13 +78835,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -78877,7 +78877,7 @@ "application/apply-patch+yaml" ], "description": "partially update status of the specified PodScheduling", - "operationId": "patchResourceV1alpha1NamespacedPodSchedulingStatus", + "operationId": "patchResourceV1alpha2NamespacedPodSchedulingStatus", "parameters": [ { "in": "body", @@ -78925,13 +78925,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -78942,13 +78942,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { @@ -78956,14 +78956,14 @@ "*/*" ], "description": "replace status of the specified PodScheduling", - "operationId": "replaceResourceV1alpha1NamespacedPodSchedulingStatus", + "operationId": "replaceResourceV1alpha2NamespacedPodSchedulingStatus", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, { @@ -78997,13 +78997,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "401": { @@ -79014,23 +79014,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims": { "delete": { "consumes": [ "*/*" ], "description": "delete collection of ResourceClaim", - "operationId": "deleteResourceV1alpha1CollectionNamespacedResourceClaim", + "operationId": "deleteResourceV1alpha2CollectionNamespacedResourceClaim", "parameters": [ { "in": "body", @@ -79144,13 +79144,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -79158,7 +79158,7 @@ "*/*" ], "description": "list or watch objects of kind ResourceClaim", - "operationId": "listResourceV1alpha1NamespacedResourceClaim", + "operationId": "listResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -79242,7 +79242,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "401": { @@ -79253,13 +79253,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -79284,14 +79284,14 @@ "*/*" ], "description": "create a ResourceClaim", - "operationId": "createResourceV1alpha1NamespacedResourceClaim", + "operationId": "createResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, { @@ -79325,19 +79325,19 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79348,23 +79348,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaims/{name}": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}": { "delete": { "consumes": [ "*/*" ], "description": "delete a ResourceClaim", - "operationId": "deleteResourceV1alpha1NamespacedResourceClaim", + "operationId": "deleteResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "in": "body", @@ -79411,13 +79411,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79428,13 +79428,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -79442,7 +79442,7 @@ "*/*" ], "description": "read the specified ResourceClaim", - "operationId": "readResourceV1alpha1NamespacedResourceClaim", + "operationId": "readResourceV1alpha2NamespacedResourceClaim", "produces": [ "application/json", "application/yaml", @@ -79452,7 +79452,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79463,13 +79463,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -79505,7 +79505,7 @@ "application/apply-patch+yaml" ], "description": "partially update the specified ResourceClaim", - "operationId": "patchResourceV1alpha1NamespacedResourceClaim", + "operationId": "patchResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "in": "body", @@ -79553,13 +79553,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79570,13 +79570,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { @@ -79584,14 +79584,14 @@ "*/*" ], "description": "replace the specified ResourceClaim", - "operationId": "replaceResourceV1alpha1NamespacedResourceClaim", + "operationId": "replaceResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, { @@ -79625,13 +79625,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79642,23 +79642,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaims/{name}/status": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}/status": { "get": { "consumes": [ "*/*" ], "description": "read status of the specified ResourceClaim", - "operationId": "readResourceV1alpha1NamespacedResourceClaimStatus", + "operationId": "readResourceV1alpha2NamespacedResourceClaimStatus", "produces": [ "application/json", "application/yaml", @@ -79668,7 +79668,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79679,13 +79679,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -79721,7 +79721,7 @@ "application/apply-patch+yaml" ], "description": "partially update status of the specified ResourceClaim", - "operationId": "patchResourceV1alpha1NamespacedResourceClaimStatus", + "operationId": "patchResourceV1alpha2NamespacedResourceClaimStatus", "parameters": [ { "in": "body", @@ -79769,13 +79769,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79786,13 +79786,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { @@ -79800,14 +79800,14 @@ "*/*" ], "description": "replace status of the specified ResourceClaim", - "operationId": "replaceResourceV1alpha1NamespacedResourceClaimStatus", + "operationId": "replaceResourceV1alpha2NamespacedResourceClaimStatus", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, { @@ -79841,13 +79841,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "401": { @@ -79858,23 +79858,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates": { "delete": { "consumes": [ "*/*" ], "description": "delete collection of ResourceClaimTemplate", - "operationId": "deleteResourceV1alpha1CollectionNamespacedResourceClaimTemplate", + "operationId": "deleteResourceV1alpha2CollectionNamespacedResourceClaimTemplate", "parameters": [ { "in": "body", @@ -79988,13 +79988,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -80002,7 +80002,7 @@ "*/*" ], "description": "list or watch objects of kind ResourceClaimTemplate", - "operationId": "listResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "listResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -80086,7 +80086,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "401": { @@ -80097,13 +80097,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -80128,14 +80128,14 @@ "*/*" ], "description": "create a ResourceClaimTemplate", - "operationId": "createResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "createResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, { @@ -80169,19 +80169,19 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "401": { @@ -80192,23 +80192,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaimtemplates/{name}": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates/{name}": { "delete": { "consumes": [ "*/*" ], "description": "delete a ResourceClaimTemplate", - "operationId": "deleteResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "deleteResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "in": "body", @@ -80255,13 +80255,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "401": { @@ -80272,13 +80272,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -80286,7 +80286,7 @@ "*/*" ], "description": "read the specified ResourceClaimTemplate", - "operationId": "readResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "readResourceV1alpha2NamespacedResourceClaimTemplate", "produces": [ "application/json", "application/yaml", @@ -80296,7 +80296,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "401": { @@ -80307,13 +80307,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -80349,7 +80349,7 @@ "application/apply-patch+yaml" ], "description": "partially update the specified ResourceClaimTemplate", - "operationId": "patchResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "patchResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "in": "body", @@ -80397,13 +80397,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "401": { @@ -80414,13 +80414,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { @@ -80428,14 +80428,14 @@ "*/*" ], "description": "replace the specified ResourceClaimTemplate", - "operationId": "replaceResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "replaceResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, { @@ -80469,13 +80469,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "401": { @@ -80486,23 +80486,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/podschedulings": { "get": { "consumes": [ "*/*" ], "description": "list or watch objects of kind PodScheduling", - "operationId": "listResourceV1alpha1PodSchedulingForAllNamespaces", + "operationId": "listResourceV1alpha2PodSchedulingForAllNamespaces", "produces": [ "application/json", "application/yaml", @@ -80514,7 +80514,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "401": { @@ -80525,13 +80525,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -80614,13 +80614,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/resourceclaims": { "get": { "consumes": [ "*/*" ], "description": "list or watch objects of kind ResourceClaim", - "operationId": "listResourceV1alpha1ResourceClaimForAllNamespaces", + "operationId": "listResourceV1alpha2ResourceClaimForAllNamespaces", "produces": [ "application/json", "application/yaml", @@ -80632,7 +80632,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "401": { @@ -80643,13 +80643,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -80732,13 +80732,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/resourceclaimtemplates": { "get": { "consumes": [ "*/*" ], "description": "list or watch objects of kind ResourceClaimTemplate", - "operationId": "listResourceV1alpha1ResourceClaimTemplateForAllNamespaces", + "operationId": "listResourceV1alpha2ResourceClaimTemplateForAllNamespaces", "produces": [ "application/json", "application/yaml", @@ -80750,7 +80750,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "401": { @@ -80761,13 +80761,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -80850,13 +80850,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/resourceclasses": { + "/apis/resource.k8s.io/v1alpha2/resourceclasses": { "delete": { "consumes": [ "*/*" ], "description": "delete collection of ResourceClass", - "operationId": "deleteResourceV1alpha1CollectionResourceClass", + "operationId": "deleteResourceV1alpha2CollectionResourceClass", "parameters": [ { "in": "body", @@ -80970,13 +80970,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -80984,7 +80984,7 @@ "*/*" ], "description": "list or watch objects of kind ResourceClass", - "operationId": "listResourceV1alpha1ResourceClass", + "operationId": "listResourceV1alpha2ResourceClass", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -81068,7 +81068,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClassList" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClassList" } }, "401": { @@ -81079,13 +81079,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -81102,14 +81102,14 @@ "*/*" ], "description": "create a ResourceClass", - "operationId": "createResourceV1alpha1ResourceClass", + "operationId": "createResourceV1alpha2ResourceClass", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, { @@ -81143,19 +81143,19 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "401": { @@ -81166,23 +81166,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/resourceclasses/{name}": { + "/apis/resource.k8s.io/v1alpha2/resourceclasses/{name}": { "delete": { "consumes": [ "*/*" ], "description": "delete a ResourceClass", - "operationId": "deleteResourceV1alpha1ResourceClass", + "operationId": "deleteResourceV1alpha2ResourceClass", "parameters": [ { "in": "body", @@ -81229,13 +81229,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "401": { @@ -81246,13 +81246,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { @@ -81260,7 +81260,7 @@ "*/*" ], "description": "read the specified ResourceClass", - "operationId": "readResourceV1alpha1ResourceClass", + "operationId": "readResourceV1alpha2ResourceClass", "produces": [ "application/json", "application/yaml", @@ -81270,7 +81270,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "401": { @@ -81281,13 +81281,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -81315,7 +81315,7 @@ "application/apply-patch+yaml" ], "description": "partially update the specified ResourceClass", - "operationId": "patchResourceV1alpha1ResourceClass", + "operationId": "patchResourceV1alpha2ResourceClass", "parameters": [ { "in": "body", @@ -81363,13 +81363,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "401": { @@ -81380,13 +81380,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { @@ -81394,14 +81394,14 @@ "*/*" ], "description": "replace the specified ResourceClass", - "operationId": "replaceResourceV1alpha1ResourceClass", + "operationId": "replaceResourceV1alpha2ResourceClass", "parameters": [ { "in": "body", "name": "body", "required": true, "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, { @@ -81435,13 +81435,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "401": { @@ -81452,23 +81452,23 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings": { "get": { "consumes": [ "*/*" ], "description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1NamespacedPodSchedulingList", + "operationId": "watchResourceV1alpha2NamespacedPodSchedulingList", "produces": [ "application/json", "application/yaml", @@ -81491,13 +81491,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -81588,13 +81588,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/podschedulings/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings/{name}": { "get": { "consumes": [ "*/*" ], "description": "watch changes to an object of kind PodScheduling. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1NamespacedPodScheduling", + "operationId": "watchResourceV1alpha2NamespacedPodScheduling", "produces": [ "application/json", "application/yaml", @@ -81617,13 +81617,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -81722,13 +81722,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaims": { "get": { "consumes": [ "*/*" ], "description": "watch individual changes to a list of ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaimList", + "operationId": "watchResourceV1alpha2NamespacedResourceClaimList", "produces": [ "application/json", "application/yaml", @@ -81751,13 +81751,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -81848,13 +81848,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaims/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaims/{name}": { "get": { "consumes": [ "*/*" ], "description": "watch changes to an object of kind ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaim", + "operationId": "watchResourceV1alpha2NamespacedResourceClaim", "produces": [ "application/json", "application/yaml", @@ -81877,13 +81877,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -81982,13 +81982,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaimtemplates": { "get": { "consumes": [ "*/*" ], "description": "watch individual changes to a list of ResourceClaimTemplate. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaimTemplateList", + "operationId": "watchResourceV1alpha2NamespacedResourceClaimTemplateList", "produces": [ "application/json", "application/yaml", @@ -82011,13 +82011,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -82108,13 +82108,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaimtemplates/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaimtemplates/{name}": { "get": { "consumes": [ "*/*" ], "description": "watch changes to an object of kind ResourceClaimTemplate. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "watchResourceV1alpha2NamespacedResourceClaimTemplate", "produces": [ "application/json", "application/yaml", @@ -82137,13 +82137,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -82242,13 +82242,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/watch/podschedulings": { "get": { "consumes": [ "*/*" ], "description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1PodSchedulingListForAllNamespaces", + "operationId": "watchResourceV1alpha2PodSchedulingListForAllNamespaces", "produces": [ "application/json", "application/yaml", @@ -82271,13 +82271,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -82360,13 +82360,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclaims": { "get": { "consumes": [ "*/*" ], "description": "watch individual changes to a list of ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1ResourceClaimListForAllNamespaces", + "operationId": "watchResourceV1alpha2ResourceClaimListForAllNamespaces", "produces": [ "application/json", "application/yaml", @@ -82389,13 +82389,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -82478,13 +82478,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclaimtemplates": { "get": { "consumes": [ "*/*" ], "description": "watch individual changes to a list of ResourceClaimTemplate. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1ResourceClaimTemplateListForAllNamespaces", + "operationId": "watchResourceV1alpha2ResourceClaimTemplateListForAllNamespaces", "produces": [ "application/json", "application/yaml", @@ -82507,13 +82507,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -82596,13 +82596,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclasses": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclasses": { "get": { "consumes": [ "*/*" ], "description": "watch individual changes to a list of ResourceClass. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1ResourceClassList", + "operationId": "watchResourceV1alpha2ResourceClassList", "produces": [ "application/json", "application/yaml", @@ -82625,13 +82625,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -82714,13 +82714,13 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclasses/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclasses/{name}": { "get": { "consumes": [ "*/*" ], "description": "watch changes to an object of kind ResourceClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1ResourceClass", + "operationId": "watchResourceV1alpha2ResourceClass", "produces": [ "application/json", "application/yaml", @@ -82743,13 +82743,13 @@ "https" ], "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ diff --git a/api/openapi-spec/v3/api__v1_openapi.json b/api/openapi-spec/v3/api__v1_openapi.json index 495640f1c01..6459e51793b 100644 --- a/api/openapi-spec/v3/api__v1_openapi.json +++ b/api/openapi-spec/v3/api__v1_openapi.json @@ -8471,7 +8471,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -8859,7 +8859,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -9201,7 +9201,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json index fbfcb7e5bc2..1afcc986a60 100644 --- a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json @@ -846,7 +846,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1229,7 +1229,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1571,7 +1571,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json index d22f4b77398..9d743fe78f9 100644 --- a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json @@ -972,7 +972,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1355,7 +1355,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1697,7 +1697,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json index f3a13487e25..67a0cd6c805 100644 --- a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json @@ -1235,7 +1235,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1561,7 +1561,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1903,7 +1903,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__apps__v1_openapi.json b/api/openapi-spec/v3/apis__apps__v1_openapi.json index 59ffd40ab56..09e89455c09 100644 --- a/api/openapi-spec/v3/apis__apps__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apps__v1_openapi.json @@ -5464,7 +5464,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -5847,7 +5847,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -6189,7 +6189,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json index e3d793decaa..d95aedbfb6c 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json @@ -601,7 +601,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -927,7 +927,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1269,7 +1269,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json index f2ec34c4a29..ed1a84487b6 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json @@ -1254,7 +1254,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1637,7 +1637,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1979,7 +1979,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__batch__v1_openapi.json b/api/openapi-spec/v3/apis__batch__v1_openapi.json index 70e2916df57..0912e8a6c34 100644 --- a/api/openapi-spec/v3/apis__batch__v1_openapi.json +++ b/api/openapi-spec/v3/apis__batch__v1_openapi.json @@ -4638,7 +4638,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -5021,7 +5021,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -5363,7 +5363,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json index bdd621654e5..2495e526248 100644 --- a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json @@ -639,7 +639,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -965,7 +965,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1307,7 +1307,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json index a5bffc416f1..b2290e53a14 100644 --- a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json @@ -528,7 +528,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -859,7 +859,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1201,7 +1201,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json index 37a36dbe530..ead32305d33 100644 --- a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json @@ -691,7 +691,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1017,7 +1017,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1359,7 +1359,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json index 99d63700d11..1096fd44af6 100644 --- a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json @@ -650,7 +650,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -981,7 +981,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1323,7 +1323,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json index 3e6de39db6b..aa475ebebb6 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json @@ -1111,7 +1111,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1437,7 +1437,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1779,7 +1779,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json index 0dff1b4d1e2..7585c5ca36f 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json @@ -1115,7 +1115,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1441,7 +1441,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1783,7 +1783,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json index c6f21b38c07..ebebf6badb5 100644 --- a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json @@ -618,7 +618,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -944,7 +944,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1286,7 +1286,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json index 135a6aa47ad..6925d45c4a5 100644 --- a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json @@ -1279,7 +1279,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1662,7 +1662,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -2004,7 +2004,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json index 197aa01a52e..8798328f7ac 100644 --- a/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json @@ -606,7 +606,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -932,7 +932,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1274,7 +1274,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json index b7d1b3b598b..e11a9815cdf 100644 --- a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json @@ -591,7 +591,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -917,7 +917,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1259,7 +1259,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__policy__v1_openapi.json b/api/openapi-spec/v3/apis__policy__v1_openapi.json index aafb06b9c65..4b87432d102 100644 --- a/api/openapi-spec/v3/apis__policy__v1_openapi.json +++ b/api/openapi-spec/v3/apis__policy__v1_openapi.json @@ -654,7 +654,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1037,7 +1037,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1379,7 +1379,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json index 22652311888..1e8f9c30e20 100644 --- a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json @@ -911,7 +911,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1294,7 +1294,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1636,7 +1636,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha2_openapi.json similarity index 96% rename from api/openapi-spec/v3/apis__resource.k8s.io__v1alpha1_openapi.json rename to api/openapi-spec/v3/apis__resource.k8s.io__v1alpha2_openapi.json index 70e1b0b0615..d9ffe00c527 100644 --- a/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha2_openapi.json @@ -82,7 +82,7 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.resource.v1alpha1.AllocationResult": { + "io.k8s.api.resource.v1alpha2.AllocationResult": { "description": "AllocationResult contains attributed of an allocated resource.", "properties": { "availableOnNodes": { @@ -104,7 +104,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.PodScheduling": { + "io.k8s.api.resource.v1alpha2.PodScheduling": { "description": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { @@ -127,7 +127,7 @@ "spec": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingSpec" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingSpec" } ], "default": {}, @@ -136,7 +136,7 @@ "status": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingStatus" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingStatus" } ], "default": {}, @@ -151,11 +151,11 @@ { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.PodSchedulingList": { + "io.k8s.api.resource.v1alpha2.PodSchedulingList": { "description": "PodSchedulingList is a collection of Pod scheduling objects.", "properties": { "apiVersion": { @@ -167,7 +167,7 @@ "items": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } ], "default": {} @@ -196,11 +196,11 @@ { "group": "resource.k8s.io", "kind": "PodSchedulingList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.PodSchedulingSpec": { + "io.k8s.api.resource.v1alpha2.PodSchedulingSpec": { "description": "PodSchedulingSpec describes where resources for the Pod are needed.", "properties": { "potentialNodes": { @@ -219,7 +219,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.PodSchedulingStatus": { + "io.k8s.api.resource.v1alpha2.PodSchedulingStatus": { "description": "PodSchedulingStatus describes where resources for the Pod can be allocated.", "properties": { "resourceClaims": { @@ -227,7 +227,7 @@ "items": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus" } ], "default": {} @@ -241,7 +241,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaim": { + "io.k8s.api.resource.v1alpha2.ResourceClaim": { "description": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { @@ -264,7 +264,7 @@ "spec": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimSpec" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimSpec" } ], "default": {}, @@ -273,7 +273,7 @@ "status": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimStatus" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimStatus" } ], "default": {}, @@ -288,11 +288,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference": { + "io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference": { "description": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", "properties": { "apiGroup": { @@ -322,7 +322,7 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimList": { + "io.k8s.api.resource.v1alpha2.ResourceClaimList": { "description": "ResourceClaimList is a collection of claims.", "properties": { "apiVersion": { @@ -334,7 +334,7 @@ "items": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } ], "default": {} @@ -363,11 +363,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference": { + "io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference": { "description": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", "properties": { "apiGroup": { @@ -391,7 +391,7 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus": { + "io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus": { "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", "properties": { "name": { @@ -410,7 +410,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimSpec": { + "io.k8s.api.resource.v1alpha2.ResourceClaimSpec": { "description": "ResourceClaimSpec defines how a resource is to be allocated.", "properties": { "allocationMode": { @@ -420,7 +420,7 @@ "parametersRef": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference" } ], "description": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim." @@ -436,13 +436,13 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimStatus": { + "io.k8s.api.resource.v1alpha2.ResourceClaimStatus": { "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", "properties": { "allocation": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.AllocationResult" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.AllocationResult" } ], "description": "Allocation is set by the resource driver once a resource has been allocated successfully. If this is not specified, the resource is not yet allocated." @@ -460,7 +460,7 @@ "items": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference" } ], "default": {} @@ -474,7 +474,7 @@ }, "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClaimTemplate": { + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplate": { "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.", "properties": { "apiVersion": { @@ -497,7 +497,7 @@ "spec": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec" } ], "default": {}, @@ -512,11 +512,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList": { + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList": { "description": "ResourceClaimTemplateList is a collection of claim templates.", "properties": { "apiVersion": { @@ -528,7 +528,7 @@ "items": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } ], "default": {} @@ -557,11 +557,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClaimTemplateList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec": { + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec": { "description": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", "properties": { "metadata": { @@ -576,7 +576,7 @@ "spec": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimSpec" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimSpec" } ], "default": {}, @@ -588,7 +588,7 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha1.ResourceClass": { + "io.k8s.api.resource.v1alpha2.ResourceClass": { "description": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { "apiVersion": { @@ -616,7 +616,7 @@ "parametersRef": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClassParametersReference" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClassParametersReference" } ], "description": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec." @@ -638,11 +638,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClassList": { + "io.k8s.api.resource.v1alpha2.ResourceClassList": { "description": "ResourceClassList is a collection of classes.", "properties": { "apiVersion": { @@ -654,7 +654,7 @@ "items": { "allOf": [ { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } ], "default": {} @@ -683,11 +683,11 @@ { "group": "resource.k8s.io", "kind": "ResourceClassList", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, - "io.k8s.api.resource.v1alpha1.ResourceClassParametersReference": { + "io.k8s.api.resource.v1alpha2.ResourceClassParametersReference": { "description": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", "properties": { "apiGroup": { @@ -1122,7 +1122,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1448,7 +1448,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1790,7 +1790,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -1844,10 +1844,10 @@ }, "openapi": "3.0.0", "paths": { - "/apis/resource.k8s.io/v1alpha1/": { + "/apis/resource.k8s.io/v1alpha2/": { "get": { "description": "get available resources", - "operationId": "getResourceV1alpha1APIResources", + "operationId": "getResourceV1alpha2APIResources", "responses": { "200": { "content": { @@ -1874,14 +1874,14 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ] } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings": { "delete": { "description": "delete collection of PodScheduling", - "operationId": "deleteResourceV1alpha1CollectionNamespacedPodScheduling", + "operationId": "deleteResourceV1alpha2CollectionNamespacedPodScheduling", "parameters": [ { "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", @@ -2027,18 +2027,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "list or watch objects of kind PodScheduling", - "operationId": "listResourceV1alpha1NamespacedPodScheduling", + "operationId": "listResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -2136,27 +2136,27 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } } }, @@ -2167,13 +2167,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -2199,7 +2199,7 @@ ], "post": { "description": "create a PodScheduling", - "operationId": "createResourceV1alpha1NamespacedPodScheduling", + "operationId": "createResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2233,7 +2233,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } } @@ -2243,17 +2243,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2263,17 +2263,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2283,17 +2283,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2304,20 +2304,20 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/podschedulings/{name}": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}": { "delete": { "description": "delete a PodScheduling", - "operationId": "deleteResourceV1alpha1NamespacedPodScheduling", + "operationId": "deleteResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2370,17 +2370,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2390,17 +2390,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2411,34 +2411,34 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "read the specified PodScheduling", - "operationId": "readResourceV1alpha1NamespacedPodScheduling", + "operationId": "readResourceV1alpha2NamespacedPodScheduling", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2449,13 +2449,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -2491,7 +2491,7 @@ ], "patch": { "description": "partially update the specified PodScheduling", - "operationId": "patchResourceV1alpha1NamespacedPodScheduling", + "operationId": "patchResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2559,17 +2559,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2579,17 +2579,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2600,18 +2600,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { "description": "replace the specified PodScheduling", - "operationId": "replaceResourceV1alpha1NamespacedPodScheduling", + "operationId": "replaceResourceV1alpha2NamespacedPodScheduling", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2645,7 +2645,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } } @@ -2655,17 +2655,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2675,17 +2675,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2696,36 +2696,36 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/podschedulings/{name}/status": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}/status": { "get": { "description": "read status of the specified PodScheduling", - "operationId": "readResourceV1alpha1NamespacedPodSchedulingStatus", + "operationId": "readResourceV1alpha2NamespacedPodSchedulingStatus", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2736,13 +2736,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -2778,7 +2778,7 @@ ], "patch": { "description": "partially update status of the specified PodScheduling", - "operationId": "patchResourceV1alpha1NamespacedPodSchedulingStatus", + "operationId": "patchResourceV1alpha2NamespacedPodSchedulingStatus", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2846,17 +2846,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2866,17 +2866,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2887,18 +2887,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { "description": "replace status of the specified PodScheduling", - "operationId": "replaceResourceV1alpha1NamespacedPodSchedulingStatus", + "operationId": "replaceResourceV1alpha2NamespacedPodSchedulingStatus", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2932,7 +2932,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } } @@ -2942,17 +2942,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2962,17 +2962,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodScheduling" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" } } }, @@ -2983,20 +2983,20 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims": { "delete": { "description": "delete collection of ResourceClaim", - "operationId": "deleteResourceV1alpha1CollectionNamespacedResourceClaim", + "operationId": "deleteResourceV1alpha2CollectionNamespacedResourceClaim", "parameters": [ { "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", @@ -3142,18 +3142,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "list or watch objects of kind ResourceClaim", - "operationId": "listResourceV1alpha1NamespacedResourceClaim", + "operationId": "listResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -3251,27 +3251,27 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } } }, @@ -3282,13 +3282,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -3314,7 +3314,7 @@ ], "post": { "description": "create a ResourceClaim", - "operationId": "createResourceV1alpha1NamespacedResourceClaim", + "operationId": "createResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -3348,7 +3348,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } } @@ -3358,17 +3358,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3378,17 +3378,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3398,17 +3398,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3419,20 +3419,20 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaims/{name}": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}": { "delete": { "description": "delete a ResourceClaim", - "operationId": "deleteResourceV1alpha1NamespacedResourceClaim", + "operationId": "deleteResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -3485,17 +3485,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3505,17 +3505,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3526,34 +3526,34 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "read the specified ResourceClaim", - "operationId": "readResourceV1alpha1NamespacedResourceClaim", + "operationId": "readResourceV1alpha2NamespacedResourceClaim", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3564,13 +3564,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -3606,7 +3606,7 @@ ], "patch": { "description": "partially update the specified ResourceClaim", - "operationId": "patchResourceV1alpha1NamespacedResourceClaim", + "operationId": "patchResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -3674,17 +3674,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3694,17 +3694,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3715,18 +3715,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { "description": "replace the specified ResourceClaim", - "operationId": "replaceResourceV1alpha1NamespacedResourceClaim", + "operationId": "replaceResourceV1alpha2NamespacedResourceClaim", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -3760,7 +3760,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } } @@ -3770,17 +3770,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3790,17 +3790,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3811,36 +3811,36 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaims/{name}/status": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}/status": { "get": { "description": "read status of the specified ResourceClaim", - "operationId": "readResourceV1alpha1NamespacedResourceClaimStatus", + "operationId": "readResourceV1alpha2NamespacedResourceClaimStatus", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3851,13 +3851,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -3893,7 +3893,7 @@ ], "patch": { "description": "partially update status of the specified ResourceClaim", - "operationId": "patchResourceV1alpha1NamespacedResourceClaimStatus", + "operationId": "patchResourceV1alpha2NamespacedResourceClaimStatus", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -3961,17 +3961,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -3981,17 +3981,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -4002,18 +4002,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { "description": "replace status of the specified ResourceClaim", - "operationId": "replaceResourceV1alpha1NamespacedResourceClaimStatus", + "operationId": "replaceResourceV1alpha2NamespacedResourceClaimStatus", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -4047,7 +4047,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } } @@ -4057,17 +4057,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -4077,17 +4077,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaim" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } }, @@ -4098,20 +4098,20 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates": { "delete": { "description": "delete collection of ResourceClaimTemplate", - "operationId": "deleteResourceV1alpha1CollectionNamespacedResourceClaimTemplate", + "operationId": "deleteResourceV1alpha2CollectionNamespacedResourceClaimTemplate", "parameters": [ { "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", @@ -4257,18 +4257,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "list or watch objects of kind ResourceClaimTemplate", - "operationId": "listResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "listResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -4366,27 +4366,27 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } } }, @@ -4397,13 +4397,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -4429,7 +4429,7 @@ ], "post": { "description": "create a ResourceClaimTemplate", - "operationId": "createResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "createResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -4463,7 +4463,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } } @@ -4473,17 +4473,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4493,17 +4493,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4513,17 +4513,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4534,20 +4534,20 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/namespaces/{namespace}/resourceclaimtemplates/{name}": { + "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates/{name}": { "delete": { "description": "delete a ResourceClaimTemplate", - "operationId": "deleteResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "deleteResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -4600,17 +4600,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4620,17 +4620,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4641,34 +4641,34 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "read the specified ResourceClaimTemplate", - "operationId": "readResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "readResourceV1alpha2NamespacedResourceClaimTemplate", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4679,13 +4679,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -4721,7 +4721,7 @@ ], "patch": { "description": "partially update the specified ResourceClaimTemplate", - "operationId": "patchResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "patchResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -4789,17 +4789,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4809,17 +4809,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4830,18 +4830,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { "description": "replace the specified ResourceClaimTemplate", - "operationId": "replaceResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "replaceResourceV1alpha2NamespacedResourceClaimTemplate", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -4875,7 +4875,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } } @@ -4885,17 +4885,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4905,17 +4905,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplate" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } }, @@ -4926,46 +4926,46 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/podschedulings": { "get": { "description": "list or watch objects of kind PodScheduling", - "operationId": "listResourceV1alpha1PodSchedulingForAllNamespaces", + "operationId": "listResourceV1alpha2PodSchedulingForAllNamespaces", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.PodSchedulingList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" } } }, @@ -4976,13 +4976,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -5087,36 +5087,36 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/resourceclaims": { "get": { "description": "list or watch objects of kind ResourceClaim", - "operationId": "listResourceV1alpha1ResourceClaimForAllNamespaces", + "operationId": "listResourceV1alpha2ResourceClaimForAllNamespaces", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimList" } } }, @@ -5127,13 +5127,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -5238,36 +5238,36 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/resourceclaimtemplates": { "get": { "description": "list or watch objects of kind ResourceClaimTemplate", - "operationId": "listResourceV1alpha1ResourceClaimTemplateForAllNamespaces", + "operationId": "listResourceV1alpha2ResourceClaimTemplateForAllNamespaces", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClaimTemplateList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList" } } }, @@ -5278,13 +5278,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -5389,10 +5389,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/resourceclasses": { + "/apis/resource.k8s.io/v1alpha2/resourceclasses": { "delete": { "description": "delete collection of ResourceClass", - "operationId": "deleteResourceV1alpha1CollectionResourceClass", + "operationId": "deleteResourceV1alpha2CollectionResourceClass", "parameters": [ { "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", @@ -5538,18 +5538,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "list or watch objects of kind ResourceClass", - "operationId": "listResourceV1alpha1ResourceClass", + "operationId": "listResourceV1alpha2ResourceClass", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -5647,27 +5647,27 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClassList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClassList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClassList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClassList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClassList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClassList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClassList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClassList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClassList" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClassList" } } }, @@ -5678,13 +5678,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -5700,7 +5700,7 @@ ], "post": { "description": "create a ResourceClass", - "operationId": "createResourceV1alpha1ResourceClass", + "operationId": "createResourceV1alpha2ResourceClass", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -5734,7 +5734,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } } @@ -5744,17 +5744,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -5764,17 +5764,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -5784,17 +5784,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -5805,20 +5805,20 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/resourceclasses/{name}": { + "/apis/resource.k8s.io/v1alpha2/resourceclasses/{name}": { "delete": { "description": "delete a ResourceClass", - "operationId": "deleteResourceV1alpha1ResourceClass", + "operationId": "deleteResourceV1alpha2ResourceClass", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -5871,17 +5871,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -5891,17 +5891,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -5912,34 +5912,34 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "get": { "description": "read the specified ResourceClass", - "operationId": "readResourceV1alpha1ResourceClass", + "operationId": "readResourceV1alpha2ResourceClass", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -5950,13 +5950,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -5982,7 +5982,7 @@ ], "patch": { "description": "partially update the specified ResourceClass", - "operationId": "patchResourceV1alpha1ResourceClass", + "operationId": "patchResourceV1alpha2ResourceClass", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -6050,17 +6050,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -6070,17 +6070,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -6091,18 +6091,18 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "put": { "description": "replace the specified ResourceClass", - "operationId": "replaceResourceV1alpha1ResourceClass", + "operationId": "replaceResourceV1alpha2ResourceClass", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -6136,7 +6136,7 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } } @@ -6146,17 +6146,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -6166,17 +6166,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha1.ResourceClass" + "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } }, @@ -6187,20 +6187,20 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } } }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings": { "get": { "description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1NamespacedPodSchedulingList", + "operationId": "watchResourceV1alpha2NamespacedPodSchedulingList", "responses": { "200": { "content": { @@ -6237,13 +6237,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -6358,10 +6358,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/podschedulings/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings/{name}": { "get": { "description": "watch changes to an object of kind PodScheduling. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1NamespacedPodScheduling", + "operationId": "watchResourceV1alpha2NamespacedPodScheduling", "responses": { "200": { "content": { @@ -6398,13 +6398,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -6529,10 +6529,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaims": { "get": { "description": "watch individual changes to a list of ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaimList", + "operationId": "watchResourceV1alpha2NamespacedResourceClaimList", "responses": { "200": { "content": { @@ -6569,13 +6569,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -6690,10 +6690,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaims/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaims/{name}": { "get": { "description": "watch changes to an object of kind ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaim", + "operationId": "watchResourceV1alpha2NamespacedResourceClaim", "responses": { "200": { "content": { @@ -6730,13 +6730,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -6861,10 +6861,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaimtemplates": { "get": { "description": "watch individual changes to a list of ResourceClaimTemplate. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaimTemplateList", + "operationId": "watchResourceV1alpha2NamespacedResourceClaimTemplateList", "responses": { "200": { "content": { @@ -6901,13 +6901,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -7022,10 +7022,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/namespaces/{namespace}/resourceclaimtemplates/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/resourceclaimtemplates/{name}": { "get": { "description": "watch changes to an object of kind ResourceClaimTemplate. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1NamespacedResourceClaimTemplate", + "operationId": "watchResourceV1alpha2NamespacedResourceClaimTemplate", "responses": { "200": { "content": { @@ -7062,13 +7062,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -7193,10 +7193,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/podschedulings": { + "/apis/resource.k8s.io/v1alpha2/watch/podschedulings": { "get": { "description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1PodSchedulingListForAllNamespaces", + "operationId": "watchResourceV1alpha2PodSchedulingListForAllNamespaces", "responses": { "200": { "content": { @@ -7233,13 +7233,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "PodScheduling", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -7344,10 +7344,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclaims": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclaims": { "get": { "description": "watch individual changes to a list of ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1ResourceClaimListForAllNamespaces", + "operationId": "watchResourceV1alpha2ResourceClaimListForAllNamespaces", "responses": { "200": { "content": { @@ -7384,13 +7384,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaim", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -7495,10 +7495,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclaimtemplates": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclaimtemplates": { "get": { "description": "watch individual changes to a list of ResourceClaimTemplate. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1ResourceClaimTemplateListForAllNamespaces", + "operationId": "watchResourceV1alpha2ResourceClaimTemplateListForAllNamespaces", "responses": { "200": { "content": { @@ -7535,13 +7535,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClaimTemplate", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -7646,10 +7646,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclasses": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclasses": { "get": { "description": "watch individual changes to a list of ResourceClass. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha1ResourceClassList", + "operationId": "watchResourceV1alpha2ResourceClassList", "responses": { "200": { "content": { @@ -7686,13 +7686,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watchlist", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ @@ -7797,10 +7797,10 @@ } ] }, - "/apis/resource.k8s.io/v1alpha1/watch/resourceclasses/{name}": { + "/apis/resource.k8s.io/v1alpha2/watch/resourceclasses/{name}": { "get": { "description": "watch changes to an object of kind ResourceClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha1ResourceClass", + "operationId": "watchResourceV1alpha2ResourceClass", "responses": { "200": { "content": { @@ -7837,13 +7837,13 @@ } }, "tags": [ - "resource_v1alpha1" + "resource_v1alpha2" ], "x-kubernetes-action": "watch", "x-kubernetes-group-version-kind": { "group": "resource.k8s.io", "kind": "ResourceClass", - "version": "v1alpha1" + "version": "v1alpha2" } }, "parameters": [ diff --git a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json index bee2beb0ec5..b4196b63111 100644 --- a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json @@ -504,7 +504,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -830,7 +830,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1172,7 +1172,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json index 4e9c9dc97d1..77cbf934d80 100644 --- a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json @@ -2343,7 +2343,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -2726,7 +2726,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -3068,7 +3068,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json b/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json index 32de31359dc..4e1b7b194db 100644 --- a/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json +++ b/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json @@ -530,7 +530,7 @@ { "group": "resource.k8s.io", "kind": "DeleteOptions", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", @@ -913,7 +913,7 @@ { "group": "resource.k8s.io", "kind": "Status", - "version": "v1alpha1" + "version": "v1alpha2" } ] }, @@ -1255,7 +1255,7 @@ { "group": "resource.k8s.io", "kind": "WatchEvent", - "version": "v1alpha1" + "version": "v1alpha2" }, { "group": "scheduling.k8s.io", diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index 6a7bd31d8a5..943012a4d9e 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -284,7 +284,7 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1"}: {group: 16100, version: 12}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1"}: {group: 16100, version: 9}, {Group: "internal.apiserver.k8s.io", Version: "v1alpha1"}: {group: 16000, version: 9}, - {Group: "resource.k8s.io", Version: "v1alpha1"}: {group: 15900, version: 9}, + {Group: "resource.k8s.io", Version: "v1alpha2"}: {group: 15900, version: 9}, // Append a new group to the end of the list if unsure. // You can use min(existing group)-100 as the initial value for a group. // Version can be set to 9 (to have space around) for a new group. diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 2dfed3e8860..59cc5035431 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -369,8 +369,8 @@ func startResourceClaimController(ctx context.Context, controllerContext Control ephemeralController, err := resourceclaim.NewController( controllerContext.ClientBuilder.ClientOrDie("resource-claim-controller"), controllerContext.InformerFactory.Core().V1().Pods(), - controllerContext.InformerFactory.Resource().V1alpha1().ResourceClaims(), - controllerContext.InformerFactory.Resource().V1alpha1().ResourceClaimTemplates()) + controllerContext.InformerFactory.Resource().V1alpha2().ResourceClaims(), + controllerContext.InformerFactory.Resource().V1alpha2().ResourceClaimTemplates()) if err != nil { return nil, true, fmt.Errorf("failed to start ephemeral volume controller: %v", err) } diff --git a/hack/lib/init.sh b/hack/lib/init.sh index 90086ad830a..6497292e013 100755 --- a/hack/lib/init.sh +++ b/hack/lib/init.sh @@ -92,7 +92,7 @@ coordination.k8s.io/v1beta1 \ coordination.k8s.io/v1 \ discovery.k8s.io/v1 \ discovery.k8s.io/v1beta1 \ -resource.k8s.io/v1alpha1 \ +resource.k8s.io/v1alpha2 \ extensions/v1beta1 \ events.k8s.io/v1 \ events.k8s.io/v1beta1 \ diff --git a/pkg/api/testing/defaulting_test.go b/pkg/api/testing/defaulting_test.go index 7b4d76d07c3..9444ad7dc34 100644 --- a/pkg/api/testing/defaulting_test.go +++ b/pkg/api/testing/defaulting_test.go @@ -137,10 +137,10 @@ func TestDefaulting(t *testing.T) { {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBindingList"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBindingList"}: {}, - {Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaim"}: {}, - {Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimList"}: {}, - {Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimTemplate"}: {}, - {Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimTemplateList"}: {}, + {Group: "resource.k8s.io", Version: "v1alpha2", Kind: "ResourceClaim"}: {}, + {Group: "resource.k8s.io", Version: "v1alpha2", Kind: "ResourceClaimList"}: {}, + {Group: "resource.k8s.io", Version: "v1alpha2", Kind: "ResourceClaimTemplate"}: {}, + {Group: "resource.k8s.io", Version: "v1alpha2", Kind: "ResourceClaimTemplateList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicy"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyBinding"}: {}, diff --git a/pkg/apis/resource/install/install.go b/pkg/apis/resource/install/install.go index f0495e7468a..578797a1b5f 100644 --- a/pkg/apis/resource/install/install.go +++ b/pkg/apis/resource/install/install.go @@ -23,7 +23,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/resource" - "k8s.io/kubernetes/pkg/apis/resource/v1alpha1" + "k8s.io/kubernetes/pkg/apis/resource/v1alpha2" ) func init() { @@ -33,6 +33,6 @@ func init() { // Install registers the API group and adds types to a scheme func Install(scheme *runtime.Scheme) { utilruntime.Must(resource.AddToScheme(scheme)) - utilruntime.Must(v1alpha1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)) + utilruntime.Must(v1alpha2.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1alpha2.SchemeGroupVersion)) } diff --git a/pkg/apis/resource/install/install_test.go b/pkg/apis/resource/install/install_test.go index ed034a71d77..ca726e95038 100644 --- a/pkg/apis/resource/install/install_test.go +++ b/pkg/apis/resource/install/install_test.go @@ -51,7 +51,7 @@ func TestResourceVersioner(t *testing.T) { func TestCodec(t *testing.T) { claim := internal.ResourceClaim{} - data, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: "resource.k8s.io", Version: "v1alpha1"}), &claim) + data, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: "resource.k8s.io", Version: "v1alpha2"}), &claim) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -59,7 +59,7 @@ func TestCodec(t *testing.T) { if err := json.Unmarshal(data, &other); err != nil { t.Fatalf("unexpected error: %v", err) } - if other.APIVersion != "resource.k8s.io/v1alpha1" || other.Kind != "ResourceClaim" { + if other.APIVersion != "resource.k8s.io/v1alpha2" || other.Kind != "ResourceClaim" { t.Errorf("unexpected unmarshalled object %#v", other) } } diff --git a/pkg/apis/resource/v1alpha1/zz_generated.conversion.go b/pkg/apis/resource/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index da556bcae16..00000000000 --- a/pkg/apis/resource/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,668 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1 "k8s.io/api/core/v1" - v1alpha1 "k8s.io/api/resource/v1alpha1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - core "k8s.io/kubernetes/pkg/apis/core" - resource "k8s.io/kubernetes/pkg/apis/resource" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1alpha1.AllocationResult)(nil), (*resource.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AllocationResult_To_resource_AllocationResult(a.(*v1alpha1.AllocationResult), b.(*resource.AllocationResult), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.AllocationResult)(nil), (*v1alpha1.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_AllocationResult_To_v1alpha1_AllocationResult(a.(*resource.AllocationResult), b.(*v1alpha1.AllocationResult), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.PodScheduling)(nil), (*resource.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PodScheduling_To_resource_PodScheduling(a.(*v1alpha1.PodScheduling), b.(*resource.PodScheduling), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodScheduling)(nil), (*v1alpha1.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodScheduling_To_v1alpha1_PodScheduling(a.(*resource.PodScheduling), b.(*v1alpha1.PodScheduling), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.PodSchedulingList)(nil), (*resource.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(a.(*v1alpha1.PodSchedulingList), b.(*resource.PodSchedulingList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingList)(nil), (*v1alpha1.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(a.(*resource.PodSchedulingList), b.(*v1alpha1.PodSchedulingList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.PodSchedulingSpec)(nil), (*resource.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(a.(*v1alpha1.PodSchedulingSpec), b.(*resource.PodSchedulingSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingSpec)(nil), (*v1alpha1.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(a.(*resource.PodSchedulingSpec), b.(*v1alpha1.PodSchedulingSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.PodSchedulingStatus)(nil), (*resource.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(a.(*v1alpha1.PodSchedulingStatus), b.(*resource.PodSchedulingStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingStatus)(nil), (*v1alpha1.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(a.(*resource.PodSchedulingStatus), b.(*v1alpha1.PodSchedulingStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaim)(nil), (*resource.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(a.(*v1alpha1.ResourceClaim), b.(*resource.ResourceClaim), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaim)(nil), (*v1alpha1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(a.(*resource.ResourceClaim), b.(*v1alpha1.ResourceClaim), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimConsumerReference)(nil), (*resource.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(a.(*v1alpha1.ResourceClaimConsumerReference), b.(*resource.ResourceClaimConsumerReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimConsumerReference)(nil), (*v1alpha1.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(a.(*resource.ResourceClaimConsumerReference), b.(*v1alpha1.ResourceClaimConsumerReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimList)(nil), (*resource.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(a.(*v1alpha1.ResourceClaimList), b.(*resource.ResourceClaimList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimList)(nil), (*v1alpha1.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(a.(*resource.ResourceClaimList), b.(*v1alpha1.ResourceClaimList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimParametersReference)(nil), (*resource.ResourceClaimParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(a.(*v1alpha1.ResourceClaimParametersReference), b.(*resource.ResourceClaimParametersReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimParametersReference)(nil), (*v1alpha1.ResourceClaimParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(a.(*resource.ResourceClaimParametersReference), b.(*v1alpha1.ResourceClaimParametersReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimSchedulingStatus)(nil), (*resource.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(a.(*v1alpha1.ResourceClaimSchedulingStatus), b.(*resource.ResourceClaimSchedulingStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSchedulingStatus)(nil), (*v1alpha1.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(a.(*resource.ResourceClaimSchedulingStatus), b.(*v1alpha1.ResourceClaimSchedulingStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimSpec)(nil), (*resource.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(a.(*v1alpha1.ResourceClaimSpec), b.(*resource.ResourceClaimSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSpec)(nil), (*v1alpha1.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(a.(*resource.ResourceClaimSpec), b.(*v1alpha1.ResourceClaimSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimStatus)(nil), (*resource.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(a.(*v1alpha1.ResourceClaimStatus), b.(*resource.ResourceClaimStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimStatus)(nil), (*v1alpha1.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(a.(*resource.ResourceClaimStatus), b.(*v1alpha1.ResourceClaimStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimTemplate)(nil), (*resource.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(a.(*v1alpha1.ResourceClaimTemplate), b.(*resource.ResourceClaimTemplate), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplate)(nil), (*v1alpha1.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(a.(*resource.ResourceClaimTemplate), b.(*v1alpha1.ResourceClaimTemplate), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimTemplateList)(nil), (*resource.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(a.(*v1alpha1.ResourceClaimTemplateList), b.(*resource.ResourceClaimTemplateList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateList)(nil), (*v1alpha1.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(a.(*resource.ResourceClaimTemplateList), b.(*v1alpha1.ResourceClaimTemplateList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimTemplateSpec)(nil), (*resource.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(a.(*v1alpha1.ResourceClaimTemplateSpec), b.(*resource.ResourceClaimTemplateSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateSpec)(nil), (*v1alpha1.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(a.(*resource.ResourceClaimTemplateSpec), b.(*v1alpha1.ResourceClaimTemplateSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClass)(nil), (*resource.ResourceClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClass_To_resource_ResourceClass(a.(*v1alpha1.ResourceClass), b.(*resource.ResourceClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClass)(nil), (*v1alpha1.ResourceClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClass_To_v1alpha1_ResourceClass(a.(*resource.ResourceClass), b.(*v1alpha1.ResourceClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClassList)(nil), (*resource.ResourceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(a.(*v1alpha1.ResourceClassList), b.(*resource.ResourceClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClassList)(nil), (*v1alpha1.ResourceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(a.(*resource.ResourceClassList), b.(*v1alpha1.ResourceClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClassParametersReference)(nil), (*resource.ResourceClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(a.(*v1alpha1.ResourceClassParametersReference), b.(*resource.ResourceClassParametersReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClassParametersReference)(nil), (*v1alpha1.ResourceClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(a.(*resource.ResourceClassParametersReference), b.(*v1alpha1.ResourceClassParametersReference), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_AllocationResult_To_resource_AllocationResult(in *v1alpha1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error { - out.ResourceHandle = in.ResourceHandle - out.AvailableOnNodes = (*core.NodeSelector)(unsafe.Pointer(in.AvailableOnNodes)) - out.Shareable = in.Shareable - return nil -} - -// Convert_v1alpha1_AllocationResult_To_resource_AllocationResult is an autogenerated conversion function. -func Convert_v1alpha1_AllocationResult_To_resource_AllocationResult(in *v1alpha1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error { - return autoConvert_v1alpha1_AllocationResult_To_resource_AllocationResult(in, out, s) -} - -func autoConvert_resource_AllocationResult_To_v1alpha1_AllocationResult(in *resource.AllocationResult, out *v1alpha1.AllocationResult, s conversion.Scope) error { - out.ResourceHandle = in.ResourceHandle - out.AvailableOnNodes = (*v1.NodeSelector)(unsafe.Pointer(in.AvailableOnNodes)) - out.Shareable = in.Shareable - return nil -} - -// Convert_resource_AllocationResult_To_v1alpha1_AllocationResult is an autogenerated conversion function. -func Convert_resource_AllocationResult_To_v1alpha1_AllocationResult(in *resource.AllocationResult, out *v1alpha1.AllocationResult, s conversion.Scope) error { - return autoConvert_resource_AllocationResult_To_v1alpha1_AllocationResult(in, out, s) -} - -func autoConvert_v1alpha1_PodScheduling_To_resource_PodScheduling(in *v1alpha1.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_PodScheduling_To_resource_PodScheduling is an autogenerated conversion function. -func Convert_v1alpha1_PodScheduling_To_resource_PodScheduling(in *v1alpha1.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error { - return autoConvert_v1alpha1_PodScheduling_To_resource_PodScheduling(in, out, s) -} - -func autoConvert_resource_PodScheduling_To_v1alpha1_PodScheduling(in *resource.PodScheduling, out *v1alpha1.PodScheduling, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_resource_PodScheduling_To_v1alpha1_PodScheduling is an autogenerated conversion function. -func Convert_resource_PodScheduling_To_v1alpha1_PodScheduling(in *resource.PodScheduling, out *v1alpha1.PodScheduling, s conversion.Scope) error { - return autoConvert_resource_PodScheduling_To_v1alpha1_PodScheduling(in, out, s) -} - -func autoConvert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha1.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]resource.PodScheduling)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList is an autogenerated conversion function. -func Convert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha1.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error { - return autoConvert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(in, out, s) -} - -func autoConvert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha1.PodSchedulingList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.PodScheduling)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList is an autogenerated conversion function. -func Convert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha1.PodSchedulingList, s conversion.Scope) error { - return autoConvert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(in, out, s) -} - -func autoConvert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha1.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error { - out.SelectedNode = in.SelectedNode - out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) - return nil -} - -// Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec is an autogenerated conversion function. -func Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha1.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(in, out, s) -} - -func autoConvert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha1.PodSchedulingSpec, s conversion.Scope) error { - out.SelectedNode = in.SelectedNode - out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) - return nil -} - -// Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec is an autogenerated conversion function. -func Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha1.PodSchedulingSpec, s conversion.Scope) error { - return autoConvert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(in, out, s) -} - -func autoConvert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha1.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error { - out.ResourceClaims = *(*[]resource.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) - return nil -} - -// Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus is an autogenerated conversion function. -func Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha1.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(in, out, s) -} - -func autoConvert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha1.PodSchedulingStatus, s conversion.Scope) error { - out.ResourceClaims = *(*[]v1alpha1.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) - return nil -} - -// Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus is an autogenerated conversion function. -func Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha1.PodSchedulingStatus, s conversion.Scope) error { - return autoConvert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(in *v1alpha1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ResourceClaim_To_resource_ResourceClaim is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(in *v1alpha1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(in, out, s) -} - -func autoConvert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(in *resource.ResourceClaim, out *v1alpha1.ResourceClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_resource_ResourceClaim_To_v1alpha1_ResourceClaim is an autogenerated conversion function. -func Convert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(in *resource.ResourceClaim, out *v1alpha1.ResourceClaim, s conversion.Scope) error { - return autoConvert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *v1alpha1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Resource = in.Resource - out.Name = in.Name - out.UID = types.UID(in.UID) - return nil -} - -// Convert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *v1alpha1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in, out, s) -} - -func autoConvert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *v1alpha1.ResourceClaimConsumerReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Resource = in.Resource - out.Name = in.Name - out.UID = types.UID(in.UID) - return nil -} - -// Convert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference is an autogenerated conversion function. -func Convert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *v1alpha1.ResourceClaimConsumerReference, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(in *v1alpha1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]resource.ResourceClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(in *v1alpha1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(in, out, s) -} - -func autoConvert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(in *resource.ResourceClaimList, out *v1alpha1.ResourceClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.ResourceClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList is an autogenerated conversion function. -func Convert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(in *resource.ResourceClaimList, out *v1alpha1.ResourceClaimList, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in *v1alpha1.ResourceClaimParametersReference, out *resource.ResourceClaimParametersReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - return nil -} - -// Convert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in *v1alpha1.ResourceClaimParametersReference, out *resource.ResourceClaimParametersReference, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in, out, s) -} - -func autoConvert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(in *resource.ResourceClaimParametersReference, out *v1alpha1.ResourceClaimParametersReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - return nil -} - -// Convert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference is an autogenerated conversion function. -func Convert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(in *resource.ResourceClaimParametersReference, out *v1alpha1.ResourceClaimParametersReference, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *v1alpha1.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error { - out.Name = in.Name - out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes)) - return nil -} - -// Convert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *v1alpha1.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in, out, s) -} - -func autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *v1alpha1.ResourceClaimSchedulingStatus, s conversion.Scope) error { - out.Name = in.Name - out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes)) - return nil -} - -// Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus is an autogenerated conversion function. -func Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *v1alpha1.ResourceClaimSchedulingStatus, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *v1alpha1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error { - out.ResourceClassName = in.ResourceClassName - out.ParametersRef = (*resource.ResourceClaimParametersReference)(unsafe.Pointer(in.ParametersRef)) - out.AllocationMode = resource.AllocationMode(in.AllocationMode) - return nil -} - -// Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *v1alpha1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in, out, s) -} - -func autoConvert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *v1alpha1.ResourceClaimSpec, s conversion.Scope) error { - out.ResourceClassName = in.ResourceClassName - out.ParametersRef = (*v1alpha1.ResourceClaimParametersReference)(unsafe.Pointer(in.ParametersRef)) - out.AllocationMode = v1alpha1.AllocationMode(in.AllocationMode) - return nil -} - -// Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec is an autogenerated conversion function. -func Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *v1alpha1.ResourceClaimSpec, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *v1alpha1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error { - out.DriverName = in.DriverName - out.Allocation = (*resource.AllocationResult)(unsafe.Pointer(in.Allocation)) - out.ReservedFor = *(*[]resource.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor)) - out.DeallocationRequested = in.DeallocationRequested - return nil -} - -// Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *v1alpha1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in, out, s) -} - -func autoConvert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *v1alpha1.ResourceClaimStatus, s conversion.Scope) error { - out.DriverName = in.DriverName - out.Allocation = (*v1alpha1.AllocationResult)(unsafe.Pointer(in.Allocation)) - out.ReservedFor = *(*[]v1alpha1.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor)) - out.DeallocationRequested = in.DeallocationRequested - return nil -} - -// Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus is an autogenerated conversion function. -func Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *v1alpha1.ResourceClaimStatus, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *v1alpha1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *v1alpha1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in, out, s) -} - -func autoConvert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *v1alpha1.ResourceClaimTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate is an autogenerated conversion function. -func Convert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *v1alpha1.ResourceClaimTemplate, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *v1alpha1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]resource.ResourceClaimTemplate)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *v1alpha1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in, out, s) -} - -func autoConvert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *v1alpha1.ResourceClaimTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.ResourceClaimTemplate)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList is an autogenerated conversion function. -func Convert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *v1alpha1.ResourceClaimTemplateList, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *v1alpha1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *v1alpha1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in, out, s) -} - -func autoConvert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *v1alpha1.ResourceClaimTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec is an autogenerated conversion function. -func Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *v1alpha1.ResourceClaimTemplateSpec, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClass_To_resource_ResourceClass(in *v1alpha1.ResourceClass, out *resource.ResourceClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.DriverName = in.DriverName - out.ParametersRef = (*resource.ResourceClassParametersReference)(unsafe.Pointer(in.ParametersRef)) - out.SuitableNodes = (*core.NodeSelector)(unsafe.Pointer(in.SuitableNodes)) - return nil -} - -// Convert_v1alpha1_ResourceClass_To_resource_ResourceClass is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClass_To_resource_ResourceClass(in *v1alpha1.ResourceClass, out *resource.ResourceClass, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClass_To_resource_ResourceClass(in, out, s) -} - -func autoConvert_resource_ResourceClass_To_v1alpha1_ResourceClass(in *resource.ResourceClass, out *v1alpha1.ResourceClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.DriverName = in.DriverName - out.ParametersRef = (*v1alpha1.ResourceClassParametersReference)(unsafe.Pointer(in.ParametersRef)) - out.SuitableNodes = (*v1.NodeSelector)(unsafe.Pointer(in.SuitableNodes)) - return nil -} - -// Convert_resource_ResourceClass_To_v1alpha1_ResourceClass is an autogenerated conversion function. -func Convert_resource_ResourceClass_To_v1alpha1_ResourceClass(in *resource.ResourceClass, out *v1alpha1.ResourceClass, s conversion.Scope) error { - return autoConvert_resource_ResourceClass_To_v1alpha1_ResourceClass(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(in *v1alpha1.ResourceClassList, out *resource.ResourceClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]resource.ResourceClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_ResourceClassList_To_resource_ResourceClassList is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(in *v1alpha1.ResourceClassList, out *resource.ResourceClassList, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(in, out, s) -} - -func autoConvert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(in *resource.ResourceClassList, out *v1alpha1.ResourceClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.ResourceClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_resource_ResourceClassList_To_v1alpha1_ResourceClassList is an autogenerated conversion function. -func Convert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(in *resource.ResourceClassList, out *v1alpha1.ResourceClassList, s conversion.Scope) error { - return autoConvert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(in, out, s) -} - -func autoConvert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in *v1alpha1.ResourceClassParametersReference, out *resource.ResourceClassParametersReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference is an autogenerated conversion function. -func Convert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in *v1alpha1.ResourceClassParametersReference, out *resource.ResourceClassParametersReference, s conversion.Scope) error { - return autoConvert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in, out, s) -} - -func autoConvert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(in *resource.ResourceClassParametersReference, out *v1alpha1.ResourceClassParametersReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup - out.Kind = in.Kind - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference is an autogenerated conversion function. -func Convert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(in *resource.ResourceClassParametersReference, out *v1alpha1.ResourceClassParametersReference, s conversion.Scope) error { - return autoConvert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(in, out, s) -} diff --git a/pkg/apis/resource/v1alpha1/conversion.go b/pkg/apis/resource/v1alpha2/conversion.go similarity index 97% rename from pkg/apis/resource/v1alpha1/conversion.go rename to pkg/apis/resource/v1alpha2/conversion.go index 25db58993fd..397d2a1fb08 100644 --- a/pkg/apis/resource/v1alpha1/conversion.go +++ b/pkg/apis/resource/v1alpha2/conversion.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( "k8s.io/apimachinery/pkg/runtime" diff --git a/pkg/apis/resource/v1alpha1/defaults.go b/pkg/apis/resource/v1alpha2/defaults.go similarity index 81% rename from pkg/apis/resource/v1alpha1/defaults.go rename to pkg/apis/resource/v1alpha2/defaults.go index 8bc728f389c..a6c681f8bf5 100644 --- a/pkg/apis/resource/v1alpha1/defaults.go +++ b/pkg/apis/resource/v1alpha2/defaults.go @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( - "k8s.io/api/resource/v1alpha1" + "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/runtime" ) @@ -25,8 +25,8 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { return RegisterDefaults(scheme) } -func SetDefaults_ResourceClaimSpec(obj *v1alpha1.ResourceClaimSpec) { +func SetDefaults_ResourceClaimSpec(obj *v1alpha2.ResourceClaimSpec) { if obj.AllocationMode == "" { - obj.AllocationMode = v1alpha1.AllocationModeWaitForFirstConsumer + obj.AllocationMode = v1alpha2.AllocationModeWaitForFirstConsumer } } diff --git a/pkg/apis/resource/v1alpha1/defaults_test.go b/pkg/apis/resource/v1alpha2/defaults_test.go similarity index 77% rename from pkg/apis/resource/v1alpha1/defaults_test.go rename to pkg/apis/resource/v1alpha2/defaults_test.go index 01dafd4fda4..7f5b0d01046 100644 --- a/pkg/apis/resource/v1alpha1/defaults_test.go +++ b/pkg/apis/resource/v1alpha2/defaults_test.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1_test +package v1alpha2_test import ( "reflect" "testing" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/runtime" // ensure types are installed @@ -29,32 +29,32 @@ import ( ) func TestSetDefaultAllocationMode(t *testing.T) { - claim := &v1alpha1.ResourceClaim{} + claim := &v1alpha2.ResourceClaim{} // field should be defaulted - defaultMode := v1alpha1.AllocationModeWaitForFirstConsumer - output := roundTrip(t, runtime.Object(claim)).(*v1alpha1.ResourceClaim) + defaultMode := v1alpha2.AllocationModeWaitForFirstConsumer + output := roundTrip(t, runtime.Object(claim)).(*v1alpha2.ResourceClaim) outMode := output.Spec.AllocationMode if outMode != defaultMode { t.Errorf("Expected AllocationMode to be defaulted to: %+v, got: %+v", defaultMode, outMode) } // field should not change - nonDefaultMode := v1alpha1.AllocationModeImmediate - claim = &v1alpha1.ResourceClaim{ - Spec: v1alpha1.ResourceClaimSpec{ + nonDefaultMode := v1alpha2.AllocationModeImmediate + claim = &v1alpha2.ResourceClaim{ + Spec: v1alpha2.ResourceClaimSpec{ AllocationMode: nonDefaultMode, }, } - output = roundTrip(t, runtime.Object(claim)).(*v1alpha1.ResourceClaim) + output = roundTrip(t, runtime.Object(claim)).(*v1alpha2.ResourceClaim) outMode = output.Spec.AllocationMode - if outMode != v1alpha1.AllocationModeImmediate { + if outMode != v1alpha2.AllocationModeImmediate { t.Errorf("Expected AllocationMode to remain %+v, got: %+v", nonDefaultMode, outMode) } } func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { - codec := legacyscheme.Codecs.LegacyCodec(v1alpha1.SchemeGroupVersion) + codec := legacyscheme.Codecs.LegacyCodec(v1alpha2.SchemeGroupVersion) data, err := runtime.Encode(codec, obj) if err != nil { t.Errorf("%v\n %#v", err, obj) diff --git a/pkg/apis/resource/v1alpha1/doc.go b/pkg/apis/resource/v1alpha2/doc.go similarity index 78% rename from pkg/apis/resource/v1alpha1/doc.go rename to pkg/apis/resource/v1alpha2/doc.go index 77afe07224b..442bddf8e0f 100644 --- a/pkg/apis/resource/v1alpha1/doc.go +++ b/pkg/apis/resource/v1alpha2/doc.go @@ -15,9 +15,9 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/resource -// +k8s:conversion-gen-external-types=k8s.io/api/resource/v1alpha1 +// +k8s:conversion-gen-external-types=k8s.io/api/resource/v1alpha2 // +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=k8s.io/api/resource/v1alpha1 +// +k8s:defaulter-gen-input=k8s.io/api/resource/v1alpha2 -// Package v1alpha1 is the v1alpha1 version of the resource API. -package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/resource/v1alpha1" +// Package v1alpha2 is the v1alpha2 version of the resource API. +package v1alpha2 // import "k8s.io/kubernetes/pkg/apis/resource/v1alpha2" diff --git a/pkg/apis/resource/v1alpha1/register.go b/pkg/apis/resource/v1alpha2/register.go similarity index 92% rename from pkg/apis/resource/v1alpha1/register.go rename to pkg/apis/resource/v1alpha2/register.go index cd225b259dc..403cb75aad7 100644 --- a/pkg/apis/resource/v1alpha1/register.go +++ b/pkg/apis/resource/v1alpha2/register.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( - "k8s.io/api/resource/v1alpha1" + "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( - localSchemeBuilder = &v1alpha1.SchemeBuilder + localSchemeBuilder = &v1alpha2.SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) @@ -38,7 +38,7 @@ func init() { const GroupName = "resource.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { diff --git a/pkg/apis/resource/v1alpha2/zz_generated.conversion.go b/pkg/apis/resource/v1alpha2/zz_generated.conversion.go new file mode 100644 index 00000000000..27dc11235a9 --- /dev/null +++ b/pkg/apis/resource/v1alpha2/zz_generated.conversion.go @@ -0,0 +1,668 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + v1alpha2 "k8s.io/api/resource/v1alpha2" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + core "k8s.io/kubernetes/pkg/apis/core" + resource "k8s.io/kubernetes/pkg/apis/resource" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1alpha2.AllocationResult)(nil), (*resource.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_AllocationResult_To_resource_AllocationResult(a.(*v1alpha2.AllocationResult), b.(*resource.AllocationResult), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.AllocationResult)(nil), (*v1alpha2.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_AllocationResult_To_v1alpha2_AllocationResult(a.(*resource.AllocationResult), b.(*v1alpha2.AllocationResult), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.PodScheduling)(nil), (*resource.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PodScheduling_To_resource_PodScheduling(a.(*v1alpha2.PodScheduling), b.(*resource.PodScheduling), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.PodScheduling)(nil), (*v1alpha2.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_PodScheduling_To_v1alpha2_PodScheduling(a.(*resource.PodScheduling), b.(*v1alpha2.PodScheduling), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingList)(nil), (*resource.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(a.(*v1alpha2.PodSchedulingList), b.(*resource.PodSchedulingList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingList)(nil), (*v1alpha2.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(a.(*resource.PodSchedulingList), b.(*v1alpha2.PodSchedulingList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingSpec)(nil), (*resource.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(a.(*v1alpha2.PodSchedulingSpec), b.(*resource.PodSchedulingSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingSpec)(nil), (*v1alpha2.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(a.(*resource.PodSchedulingSpec), b.(*v1alpha2.PodSchedulingSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingStatus)(nil), (*resource.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(a.(*v1alpha2.PodSchedulingStatus), b.(*resource.PodSchedulingStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingStatus)(nil), (*v1alpha2.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(a.(*resource.PodSchedulingStatus), b.(*v1alpha2.PodSchedulingStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaim)(nil), (*resource.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaim_To_resource_ResourceClaim(a.(*v1alpha2.ResourceClaim), b.(*resource.ResourceClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaim)(nil), (*v1alpha2.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaim_To_v1alpha2_ResourceClaim(a.(*resource.ResourceClaim), b.(*v1alpha2.ResourceClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimConsumerReference)(nil), (*resource.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(a.(*v1alpha2.ResourceClaimConsumerReference), b.(*resource.ResourceClaimConsumerReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimConsumerReference)(nil), (*v1alpha2.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimConsumerReference_To_v1alpha2_ResourceClaimConsumerReference(a.(*resource.ResourceClaimConsumerReference), b.(*v1alpha2.ResourceClaimConsumerReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimList)(nil), (*resource.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimList_To_resource_ResourceClaimList(a.(*v1alpha2.ResourceClaimList), b.(*resource.ResourceClaimList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimList)(nil), (*v1alpha2.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimList_To_v1alpha2_ResourceClaimList(a.(*resource.ResourceClaimList), b.(*v1alpha2.ResourceClaimList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimParametersReference)(nil), (*resource.ResourceClaimParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(a.(*v1alpha2.ResourceClaimParametersReference), b.(*resource.ResourceClaimParametersReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimParametersReference)(nil), (*v1alpha2.ResourceClaimParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimParametersReference_To_v1alpha2_ResourceClaimParametersReference(a.(*resource.ResourceClaimParametersReference), b.(*v1alpha2.ResourceClaimParametersReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimSchedulingStatus)(nil), (*resource.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(a.(*v1alpha2.ResourceClaimSchedulingStatus), b.(*resource.ResourceClaimSchedulingStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSchedulingStatus)(nil), (*v1alpha2.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha2_ResourceClaimSchedulingStatus(a.(*resource.ResourceClaimSchedulingStatus), b.(*v1alpha2.ResourceClaimSchedulingStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimSpec)(nil), (*resource.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimSpec_To_resource_ResourceClaimSpec(a.(*v1alpha2.ResourceClaimSpec), b.(*resource.ResourceClaimSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSpec)(nil), (*v1alpha2.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimSpec_To_v1alpha2_ResourceClaimSpec(a.(*resource.ResourceClaimSpec), b.(*v1alpha2.ResourceClaimSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimStatus)(nil), (*resource.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimStatus_To_resource_ResourceClaimStatus(a.(*v1alpha2.ResourceClaimStatus), b.(*resource.ResourceClaimStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimStatus)(nil), (*v1alpha2.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimStatus_To_v1alpha2_ResourceClaimStatus(a.(*resource.ResourceClaimStatus), b.(*v1alpha2.ResourceClaimStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimTemplate)(nil), (*resource.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(a.(*v1alpha2.ResourceClaimTemplate), b.(*resource.ResourceClaimTemplate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplate)(nil), (*v1alpha2.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimTemplate_To_v1alpha2_ResourceClaimTemplate(a.(*resource.ResourceClaimTemplate), b.(*v1alpha2.ResourceClaimTemplate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimTemplateList)(nil), (*resource.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(a.(*v1alpha2.ResourceClaimTemplateList), b.(*resource.ResourceClaimTemplateList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateList)(nil), (*v1alpha2.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimTemplateList_To_v1alpha2_ResourceClaimTemplateList(a.(*resource.ResourceClaimTemplateList), b.(*v1alpha2.ResourceClaimTemplateList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClaimTemplateSpec)(nil), (*resource.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(a.(*v1alpha2.ResourceClaimTemplateSpec), b.(*resource.ResourceClaimTemplateSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateSpec)(nil), (*v1alpha2.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClaimTemplateSpec_To_v1alpha2_ResourceClaimTemplateSpec(a.(*resource.ResourceClaimTemplateSpec), b.(*v1alpha2.ResourceClaimTemplateSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClass)(nil), (*resource.ResourceClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClass_To_resource_ResourceClass(a.(*v1alpha2.ResourceClass), b.(*resource.ResourceClass), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClass)(nil), (*v1alpha2.ResourceClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClass_To_v1alpha2_ResourceClass(a.(*resource.ResourceClass), b.(*v1alpha2.ResourceClass), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClassList)(nil), (*resource.ResourceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClassList_To_resource_ResourceClassList(a.(*v1alpha2.ResourceClassList), b.(*resource.ResourceClassList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClassList)(nil), (*v1alpha2.ResourceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClassList_To_v1alpha2_ResourceClassList(a.(*resource.ResourceClassList), b.(*v1alpha2.ResourceClassList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceClassParametersReference)(nil), (*resource.ResourceClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(a.(*v1alpha2.ResourceClassParametersReference), b.(*resource.ResourceClassParametersReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*resource.ResourceClassParametersReference)(nil), (*v1alpha2.ResourceClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_ResourceClassParametersReference_To_v1alpha2_ResourceClassParametersReference(a.(*resource.ResourceClassParametersReference), b.(*v1alpha2.ResourceClassParametersReference), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha2_AllocationResult_To_resource_AllocationResult(in *v1alpha2.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error { + out.ResourceHandle = in.ResourceHandle + out.AvailableOnNodes = (*core.NodeSelector)(unsafe.Pointer(in.AvailableOnNodes)) + out.Shareable = in.Shareable + return nil +} + +// Convert_v1alpha2_AllocationResult_To_resource_AllocationResult is an autogenerated conversion function. +func Convert_v1alpha2_AllocationResult_To_resource_AllocationResult(in *v1alpha2.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error { + return autoConvert_v1alpha2_AllocationResult_To_resource_AllocationResult(in, out, s) +} + +func autoConvert_resource_AllocationResult_To_v1alpha2_AllocationResult(in *resource.AllocationResult, out *v1alpha2.AllocationResult, s conversion.Scope) error { + out.ResourceHandle = in.ResourceHandle + out.AvailableOnNodes = (*v1.NodeSelector)(unsafe.Pointer(in.AvailableOnNodes)) + out.Shareable = in.Shareable + return nil +} + +// Convert_resource_AllocationResult_To_v1alpha2_AllocationResult is an autogenerated conversion function. +func Convert_resource_AllocationResult_To_v1alpha2_AllocationResult(in *resource.AllocationResult, out *v1alpha2.AllocationResult, s conversion.Scope) error { + return autoConvert_resource_AllocationResult_To_v1alpha2_AllocationResult(in, out, s) +} + +func autoConvert_v1alpha2_PodScheduling_To_resource_PodScheduling(in *v1alpha2.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_PodScheduling_To_resource_PodScheduling is an autogenerated conversion function. +func Convert_v1alpha2_PodScheduling_To_resource_PodScheduling(in *v1alpha2.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error { + return autoConvert_v1alpha2_PodScheduling_To_resource_PodScheduling(in, out, s) +} + +func autoConvert_resource_PodScheduling_To_v1alpha2_PodScheduling(in *resource.PodScheduling, out *v1alpha2.PodScheduling, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_resource_PodScheduling_To_v1alpha2_PodScheduling is an autogenerated conversion function. +func Convert_resource_PodScheduling_To_v1alpha2_PodScheduling(in *resource.PodScheduling, out *v1alpha2.PodScheduling, s conversion.Scope) error { + return autoConvert_resource_PodScheduling_To_v1alpha2_PodScheduling(in, out, s) +} + +func autoConvert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha2.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]resource.PodScheduling)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList is an autogenerated conversion function. +func Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha2.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error { + return autoConvert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in, out, s) +} + +func autoConvert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha2.PodSchedulingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.PodScheduling)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList is an autogenerated conversion function. +func Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha2.PodSchedulingList, s conversion.Scope) error { + return autoConvert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in, out, s) +} + +func autoConvert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha2.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error { + out.SelectedNode = in.SelectedNode + out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) + return nil +} + +// Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec is an autogenerated conversion function. +func Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha2.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in, out, s) +} + +func autoConvert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha2.PodSchedulingSpec, s conversion.Scope) error { + out.SelectedNode = in.SelectedNode + out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) + return nil +} + +// Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec is an autogenerated conversion function. +func Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha2.PodSchedulingSpec, s conversion.Scope) error { + return autoConvert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in, out, s) +} + +func autoConvert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha2.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error { + out.ResourceClaims = *(*[]resource.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) + return nil +} + +// Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus is an autogenerated conversion function. +func Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha2.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in, out, s) +} + +func autoConvert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha2.PodSchedulingStatus, s conversion.Scope) error { + out.ResourceClaims = *(*[]v1alpha2.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) + return nil +} + +// Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus is an autogenerated conversion function. +func Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha2.PodSchedulingStatus, s conversion.Scope) error { + return autoConvert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaim_To_resource_ResourceClaim(in *v1alpha2.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_ResourceClaimStatus_To_resource_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_ResourceClaim_To_resource_ResourceClaim is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaim_To_resource_ResourceClaim(in *v1alpha2.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaim_To_resource_ResourceClaim(in, out, s) +} + +func autoConvert_resource_ResourceClaim_To_v1alpha2_ResourceClaim(in *resource.ResourceClaim, out *v1alpha2.ResourceClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_resource_ResourceClaimSpec_To_v1alpha2_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_resource_ResourceClaimStatus_To_v1alpha2_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_resource_ResourceClaim_To_v1alpha2_ResourceClaim is an autogenerated conversion function. +func Convert_resource_ResourceClaim_To_v1alpha2_ResourceClaim(in *resource.ResourceClaim, out *v1alpha2.ResourceClaim, s conversion.Scope) error { + return autoConvert_resource_ResourceClaim_To_v1alpha2_ResourceClaim(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *v1alpha2.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Resource = in.Resource + out.Name = in.Name + out.UID = types.UID(in.UID) + return nil +} + +// Convert_v1alpha2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *v1alpha2.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in, out, s) +} + +func autoConvert_resource_ResourceClaimConsumerReference_To_v1alpha2_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *v1alpha2.ResourceClaimConsumerReference, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Resource = in.Resource + out.Name = in.Name + out.UID = types.UID(in.UID) + return nil +} + +// Convert_resource_ResourceClaimConsumerReference_To_v1alpha2_ResourceClaimConsumerReference is an autogenerated conversion function. +func Convert_resource_ResourceClaimConsumerReference_To_v1alpha2_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *v1alpha2.ResourceClaimConsumerReference, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimConsumerReference_To_v1alpha2_ResourceClaimConsumerReference(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimList_To_resource_ResourceClaimList(in *v1alpha2.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]resource.ResourceClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_ResourceClaimList_To_resource_ResourceClaimList is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimList_To_resource_ResourceClaimList(in *v1alpha2.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimList_To_resource_ResourceClaimList(in, out, s) +} + +func autoConvert_resource_ResourceClaimList_To_v1alpha2_ResourceClaimList(in *resource.ResourceClaimList, out *v1alpha2.ResourceClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.ResourceClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_resource_ResourceClaimList_To_v1alpha2_ResourceClaimList is an autogenerated conversion function. +func Convert_resource_ResourceClaimList_To_v1alpha2_ResourceClaimList(in *resource.ResourceClaimList, out *v1alpha2.ResourceClaimList, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimList_To_v1alpha2_ResourceClaimList(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in *v1alpha2.ResourceClaimParametersReference, out *resource.ResourceClaimParametersReference, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +// Convert_v1alpha2_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in *v1alpha2.ResourceClaimParametersReference, out *resource.ResourceClaimParametersReference, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in, out, s) +} + +func autoConvert_resource_ResourceClaimParametersReference_To_v1alpha2_ResourceClaimParametersReference(in *resource.ResourceClaimParametersReference, out *v1alpha2.ResourceClaimParametersReference, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +// Convert_resource_ResourceClaimParametersReference_To_v1alpha2_ResourceClaimParametersReference is an autogenerated conversion function. +func Convert_resource_ResourceClaimParametersReference_To_v1alpha2_ResourceClaimParametersReference(in *resource.ResourceClaimParametersReference, out *v1alpha2.ResourceClaimParametersReference, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimParametersReference_To_v1alpha2_ResourceClaimParametersReference(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *v1alpha2.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error { + out.Name = in.Name + out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes)) + return nil +} + +// Convert_v1alpha2_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *v1alpha2.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in, out, s) +} + +func autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha2_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *v1alpha2.ResourceClaimSchedulingStatus, s conversion.Scope) error { + out.Name = in.Name + out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes)) + return nil +} + +// Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha2_ResourceClaimSchedulingStatus is an autogenerated conversion function. +func Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha2_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *v1alpha2.ResourceClaimSchedulingStatus, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha2_ResourceClaimSchedulingStatus(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *v1alpha2.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error { + out.ResourceClassName = in.ResourceClassName + out.ParametersRef = (*resource.ResourceClaimParametersReference)(unsafe.Pointer(in.ParametersRef)) + out.AllocationMode = resource.AllocationMode(in.AllocationMode) + return nil +} + +// Convert_v1alpha2_ResourceClaimSpec_To_resource_ResourceClaimSpec is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *v1alpha2.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimSpec_To_resource_ResourceClaimSpec(in, out, s) +} + +func autoConvert_resource_ResourceClaimSpec_To_v1alpha2_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *v1alpha2.ResourceClaimSpec, s conversion.Scope) error { + out.ResourceClassName = in.ResourceClassName + out.ParametersRef = (*v1alpha2.ResourceClaimParametersReference)(unsafe.Pointer(in.ParametersRef)) + out.AllocationMode = v1alpha2.AllocationMode(in.AllocationMode) + return nil +} + +// Convert_resource_ResourceClaimSpec_To_v1alpha2_ResourceClaimSpec is an autogenerated conversion function. +func Convert_resource_ResourceClaimSpec_To_v1alpha2_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *v1alpha2.ResourceClaimSpec, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimSpec_To_v1alpha2_ResourceClaimSpec(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *v1alpha2.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error { + out.DriverName = in.DriverName + out.Allocation = (*resource.AllocationResult)(unsafe.Pointer(in.Allocation)) + out.ReservedFor = *(*[]resource.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor)) + out.DeallocationRequested = in.DeallocationRequested + return nil +} + +// Convert_v1alpha2_ResourceClaimStatus_To_resource_ResourceClaimStatus is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *v1alpha2.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimStatus_To_resource_ResourceClaimStatus(in, out, s) +} + +func autoConvert_resource_ResourceClaimStatus_To_v1alpha2_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *v1alpha2.ResourceClaimStatus, s conversion.Scope) error { + out.DriverName = in.DriverName + out.Allocation = (*v1alpha2.AllocationResult)(unsafe.Pointer(in.Allocation)) + out.ReservedFor = *(*[]v1alpha2.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor)) + out.DeallocationRequested = in.DeallocationRequested + return nil +} + +// Convert_resource_ResourceClaimStatus_To_v1alpha2_ResourceClaimStatus is an autogenerated conversion function. +func Convert_resource_ResourceClaimStatus_To_v1alpha2_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *v1alpha2.ResourceClaimStatus, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimStatus_To_v1alpha2_ResourceClaimStatus(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *v1alpha2.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *v1alpha2.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in, out, s) +} + +func autoConvert_resource_ResourceClaimTemplate_To_v1alpha2_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *v1alpha2.ResourceClaimTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_resource_ResourceClaimTemplateSpec_To_v1alpha2_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_resource_ResourceClaimTemplate_To_v1alpha2_ResourceClaimTemplate is an autogenerated conversion function. +func Convert_resource_ResourceClaimTemplate_To_v1alpha2_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *v1alpha2.ResourceClaimTemplate, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimTemplate_To_v1alpha2_ResourceClaimTemplate(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *v1alpha2.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]resource.ResourceClaimTemplate)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *v1alpha2.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in, out, s) +} + +func autoConvert_resource_ResourceClaimTemplateList_To_v1alpha2_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *v1alpha2.ResourceClaimTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.ResourceClaimTemplate)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_resource_ResourceClaimTemplateList_To_v1alpha2_ResourceClaimTemplateList is an autogenerated conversion function. +func Convert_resource_ResourceClaimTemplateList_To_v1alpha2_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *v1alpha2.ResourceClaimTemplateList, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimTemplateList_To_v1alpha2_ResourceClaimTemplateList(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *v1alpha2.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *v1alpha2.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in, out, s) +} + +func autoConvert_resource_ResourceClaimTemplateSpec_To_v1alpha2_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *v1alpha2.ResourceClaimTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_resource_ResourceClaimSpec_To_v1alpha2_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_resource_ResourceClaimTemplateSpec_To_v1alpha2_ResourceClaimTemplateSpec is an autogenerated conversion function. +func Convert_resource_ResourceClaimTemplateSpec_To_v1alpha2_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *v1alpha2.ResourceClaimTemplateSpec, s conversion.Scope) error { + return autoConvert_resource_ResourceClaimTemplateSpec_To_v1alpha2_ResourceClaimTemplateSpec(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClass_To_resource_ResourceClass(in *v1alpha2.ResourceClass, out *resource.ResourceClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.DriverName = in.DriverName + out.ParametersRef = (*resource.ResourceClassParametersReference)(unsafe.Pointer(in.ParametersRef)) + out.SuitableNodes = (*core.NodeSelector)(unsafe.Pointer(in.SuitableNodes)) + return nil +} + +// Convert_v1alpha2_ResourceClass_To_resource_ResourceClass is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClass_To_resource_ResourceClass(in *v1alpha2.ResourceClass, out *resource.ResourceClass, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClass_To_resource_ResourceClass(in, out, s) +} + +func autoConvert_resource_ResourceClass_To_v1alpha2_ResourceClass(in *resource.ResourceClass, out *v1alpha2.ResourceClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.DriverName = in.DriverName + out.ParametersRef = (*v1alpha2.ResourceClassParametersReference)(unsafe.Pointer(in.ParametersRef)) + out.SuitableNodes = (*v1.NodeSelector)(unsafe.Pointer(in.SuitableNodes)) + return nil +} + +// Convert_resource_ResourceClass_To_v1alpha2_ResourceClass is an autogenerated conversion function. +func Convert_resource_ResourceClass_To_v1alpha2_ResourceClass(in *resource.ResourceClass, out *v1alpha2.ResourceClass, s conversion.Scope) error { + return autoConvert_resource_ResourceClass_To_v1alpha2_ResourceClass(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClassList_To_resource_ResourceClassList(in *v1alpha2.ResourceClassList, out *resource.ResourceClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]resource.ResourceClass)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_ResourceClassList_To_resource_ResourceClassList is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClassList_To_resource_ResourceClassList(in *v1alpha2.ResourceClassList, out *resource.ResourceClassList, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClassList_To_resource_ResourceClassList(in, out, s) +} + +func autoConvert_resource_ResourceClassList_To_v1alpha2_ResourceClassList(in *resource.ResourceClassList, out *v1alpha2.ResourceClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha2.ResourceClass)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_resource_ResourceClassList_To_v1alpha2_ResourceClassList is an autogenerated conversion function. +func Convert_resource_ResourceClassList_To_v1alpha2_ResourceClassList(in *resource.ResourceClassList, out *v1alpha2.ResourceClassList, s conversion.Scope) error { + return autoConvert_resource_ResourceClassList_To_v1alpha2_ResourceClassList(in, out, s) +} + +func autoConvert_v1alpha2_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in *v1alpha2.ResourceClassParametersReference, out *resource.ResourceClassParametersReference, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_v1alpha2_ResourceClassParametersReference_To_resource_ResourceClassParametersReference is an autogenerated conversion function. +func Convert_v1alpha2_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in *v1alpha2.ResourceClassParametersReference, out *resource.ResourceClassParametersReference, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in, out, s) +} + +func autoConvert_resource_ResourceClassParametersReference_To_v1alpha2_ResourceClassParametersReference(in *resource.ResourceClassParametersReference, out *v1alpha2.ResourceClassParametersReference, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_resource_ResourceClassParametersReference_To_v1alpha2_ResourceClassParametersReference is an autogenerated conversion function. +func Convert_resource_ResourceClassParametersReference_To_v1alpha2_ResourceClassParametersReference(in *resource.ResourceClassParametersReference, out *v1alpha2.ResourceClassParametersReference, s conversion.Scope) error { + return autoConvert_resource_ResourceClassParametersReference_To_v1alpha2_ResourceClassParametersReference(in, out, s) +} diff --git a/pkg/apis/resource/v1alpha1/zz_generated.defaults.go b/pkg/apis/resource/v1alpha2/zz_generated.defaults.go similarity index 66% rename from pkg/apis/resource/v1alpha1/zz_generated.defaults.go rename to pkg/apis/resource/v1alpha2/zz_generated.defaults.go index e93e4a988a9..bcf5e1aa5bb 100644 --- a/pkg/apis/resource/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/resource/v1alpha2/zz_generated.defaults.go @@ -19,10 +19,10 @@ limitations under the License. // Code generated by defaulter-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,31 +30,31 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaim{}, func(obj interface{}) { SetObjectDefaults_ResourceClaim(obj.(*v1alpha1.ResourceClaim)) }) - scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaimList{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimList(obj.(*v1alpha1.ResourceClaimList)) }) - scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaimTemplate{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimTemplate(obj.(*v1alpha1.ResourceClaimTemplate)) }) - scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaimTemplateList{}, func(obj interface{}) { - SetObjectDefaults_ResourceClaimTemplateList(obj.(*v1alpha1.ResourceClaimTemplateList)) + scheme.AddTypeDefaultingFunc(&v1alpha2.ResourceClaim{}, func(obj interface{}) { SetObjectDefaults_ResourceClaim(obj.(*v1alpha2.ResourceClaim)) }) + scheme.AddTypeDefaultingFunc(&v1alpha2.ResourceClaimList{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimList(obj.(*v1alpha2.ResourceClaimList)) }) + scheme.AddTypeDefaultingFunc(&v1alpha2.ResourceClaimTemplate{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimTemplate(obj.(*v1alpha2.ResourceClaimTemplate)) }) + scheme.AddTypeDefaultingFunc(&v1alpha2.ResourceClaimTemplateList{}, func(obj interface{}) { + SetObjectDefaults_ResourceClaimTemplateList(obj.(*v1alpha2.ResourceClaimTemplateList)) }) return nil } -func SetObjectDefaults_ResourceClaim(in *v1alpha1.ResourceClaim) { +func SetObjectDefaults_ResourceClaim(in *v1alpha2.ResourceClaim) { SetDefaults_ResourceClaimSpec(&in.Spec) } -func SetObjectDefaults_ResourceClaimList(in *v1alpha1.ResourceClaimList) { +func SetObjectDefaults_ResourceClaimList(in *v1alpha2.ResourceClaimList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_ResourceClaim(a) } } -func SetObjectDefaults_ResourceClaimTemplate(in *v1alpha1.ResourceClaimTemplate) { +func SetObjectDefaults_ResourceClaimTemplate(in *v1alpha2.ResourceClaimTemplate) { SetDefaults_ResourceClaimSpec(&in.Spec.Spec) } -func SetObjectDefaults_ResourceClaimTemplateList(in *v1alpha1.ResourceClaimTemplateList) { +func SetObjectDefaults_ResourceClaimTemplateList(in *v1alpha2.ResourceClaimTemplateList) { for i := range in.Items { a := &in.Items[i] SetObjectDefaults_ResourceClaimTemplate(a) diff --git a/pkg/controller/resourceclaim/controller.go b/pkg/controller/resourceclaim/controller.go index ee2321051b0..811e2dfa63e 100644 --- a/pkg/controller/resourceclaim/controller.go +++ b/pkg/controller/resourceclaim/controller.go @@ -23,18 +23,18 @@ import ( "time" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" v1informers "k8s.io/client-go/informers/core/v1" - resourcev1alpha1informers "k8s.io/client-go/informers/resource/v1alpha1" + resourcev1alpha2informers "k8s.io/client-go/informers/resource/v1alpha2" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1listers "k8s.io/client-go/listers/core/v1" - resourcev1alpha1listers "k8s.io/client-go/listers/resource/v1alpha1" + resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" @@ -60,7 +60,7 @@ type Controller struct { // claimLister is the shared ResourceClaim lister used to fetch and store ResourceClaim // objects from the API server. It is shared with other controllers and // therefore the ResourceClaim objects in its store should be treated as immutable. - claimLister resourcev1alpha1listers.ResourceClaimLister + claimLister resourcev1alpha2listers.ResourceClaimLister claimsSynced cache.InformerSynced // podLister is the shared Pod lister used to fetch Pod @@ -73,7 +73,7 @@ type Controller struct { // fetch template objects from the API server. It is shared with other // controllers and therefore the objects in its store should be treated // as immutable. - templateLister resourcev1alpha1listers.ResourceClaimTemplateLister + templateLister resourcev1alpha2listers.ResourceClaimTemplateLister templatesSynced cache.InformerSynced // podIndexer has the common PodResourceClaim indexer indexer installed To @@ -100,8 +100,8 @@ const ( func NewController( kubeClient clientset.Interface, podInformer v1informers.PodInformer, - claimInformer resourcev1alpha1informers.ResourceClaimInformer, - templateInformer resourcev1alpha1informers.ResourceClaimTemplateInformer) (*Controller, error) { + claimInformer resourcev1alpha2informers.ResourceClaimInformer, + templateInformer resourcev1alpha2informers.ResourceClaimTemplateInformer) (*Controller, error) { ec := &Controller{ kubeClient: kubeClient, @@ -190,7 +190,7 @@ func (ec *Controller) enqueuePod(obj interface{}, deleted bool) { } func (ec *Controller) onResourceClaimAddOrUpdate(obj interface{}) { - claim, ok := obj.(*resourcev1alpha1.ResourceClaim) + claim, ok := obj.(*resourcev1alpha2.ResourceClaim) if !ok { return } @@ -202,7 +202,7 @@ func (ec *Controller) onResourceClaimAddOrUpdate(obj interface{}) { } func (ec *Controller) onResourceClaimDelete(obj interface{}) { - claim, ok := obj.(*resourcev1alpha1.ResourceClaim) + claim, ok := obj.(*resourcev1alpha2.ResourceClaim) if !ok { return } @@ -356,7 +356,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1. // Create the ResourceClaim with pod as owner. isTrue := true - claim = &resourcev1alpha1.ResourceClaim{ + claim = &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{ Name: claimName, OwnerReferences: []metav1.OwnerReference{ @@ -375,7 +375,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1. Spec: template.Spec.Spec, } metrics.ResourceClaimCreateAttempts.Inc() - _, err = ec.kubeClient.ResourceV1alpha1().ResourceClaims(pod.Namespace).Create(ctx, claim, metav1.CreateOptions{}) + _, err = ec.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Create(ctx, claim, metav1.CreateOptions{}) if err != nil { metrics.ResourceClaimCreateFailures.Inc() return fmt.Errorf("create ResourceClaim %s: %v", claimName, err) @@ -396,7 +396,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err } // Check if the ReservedFor entries are all still valid. - valid := make([]resourcev1alpha1.ResourceClaimConsumerReference, 0, len(claim.Status.ReservedFor)) + valid := make([]resourcev1alpha2.ResourceClaimConsumerReference, 0, len(claim.Status.ReservedFor)) for _, reservedFor := range claim.Status.ReservedFor { if reservedFor.APIGroup == "" && reservedFor.Resource == "pods" { @@ -455,7 +455,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err // TODO (#113700): patch claim := claim.DeepCopy() claim.Status.ReservedFor = valid - _, err := ec.kubeClient.ResourceV1alpha1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) + _, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) if err != nil { return err } diff --git a/pkg/controller/resourceclaim/controller_test.go b/pkg/controller/resourceclaim/controller_test.go index 08adf0c8e5b..ee937fe5f07 100644 --- a/pkg/controller/resourceclaim/controller_test.go +++ b/pkg/controller/resourceclaim/controller_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -53,10 +53,10 @@ var ( testPodWithResource = makePod(testPodName, testNamespace, testPodUID, *makePodResourceClaim(podResourceClaimName, templateName)) otherTestPod = makePod(testPodName+"-II", testNamespace, testPodUID+"-II") testClaim = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, makeOwnerReference(testPodWithResource, true)) - testClaimReserved = func() *resourcev1alpha1.ResourceClaim { + testClaimReserved = func() *resourcev1alpha2.ResourceClaim { claim := testClaim.DeepCopy() claim.Status.ReservedFor = append(claim.Status.ReservedFor, - resourcev1alpha1.ResourceClaimConsumerReference{ + resourcev1alpha2.ResourceClaimConsumerReference{ Resource: "pods", Name: testPodWithResource.Name, UID: testPodWithResource.UID, @@ -64,10 +64,10 @@ var ( ) return claim }() - testClaimReservedTwice = func() *resourcev1alpha1.ResourceClaim { + testClaimReservedTwice = func() *resourcev1alpha2.ResourceClaim { claim := testClaimReserved.DeepCopy() claim.Status.ReservedFor = append(claim.Status.ReservedFor, - resourcev1alpha1.ResourceClaimConsumerReference{ + resourcev1alpha2.ResourceClaimConsumerReference{ Resource: "pods", Name: otherTestPod.Name, UID: otherTestPod.UID, @@ -88,20 +88,20 @@ func TestSyncHandler(t *testing.T) { tests := []struct { name string key string - claims []*resourcev1alpha1.ResourceClaim + claims []*resourcev1alpha2.ResourceClaim pods []*v1.Pod podsLater []*v1.Pod - templates []*resourcev1alpha1.ResourceClaimTemplate - expectedClaims []resourcev1alpha1.ResourceClaim + templates []*resourcev1alpha2.ResourceClaimTemplate + expectedClaims []resourcev1alpha2.ResourceClaim expectedError bool expectedMetrics expectedMetrics }{ { name: "create", pods: []*v1.Pod{testPodWithResource}, - templates: []*resourcev1alpha1.ResourceClaimTemplate{template}, + templates: []*resourcev1alpha2.ResourceClaimTemplate{template}, key: podKey(testPodWithResource), - expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaim}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaim}, expectedMetrics: expectedMetrics{1, 0}, }, { @@ -115,8 +115,8 @@ func TestSyncHandler(t *testing.T) { name: "nop", pods: []*v1.Pod{testPodWithResource}, key: podKey(testPodWithResource), - claims: []*resourcev1alpha1.ResourceClaim{testClaim}, - expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaim}, + claims: []*resourcev1alpha2.ResourceClaim{testClaim}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaim}, expectedMetrics: expectedMetrics{0, 0}, }, { @@ -141,24 +141,24 @@ func TestSyncHandler(t *testing.T) { { name: "create-with-other-claim", pods: []*v1.Pod{testPodWithResource}, - templates: []*resourcev1alpha1.ResourceClaimTemplate{template}, + templates: []*resourcev1alpha2.ResourceClaimTemplate{template}, key: podKey(testPodWithResource), - claims: []*resourcev1alpha1.ResourceClaim{otherNamespaceClaim}, - expectedClaims: []resourcev1alpha1.ResourceClaim{*otherNamespaceClaim, *testClaim}, + claims: []*resourcev1alpha2.ResourceClaim{otherNamespaceClaim}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*otherNamespaceClaim, *testClaim}, expectedMetrics: expectedMetrics{1, 0}, }, { name: "wrong-claim-owner", pods: []*v1.Pod{testPodWithResource}, key: podKey(testPodWithResource), - claims: []*resourcev1alpha1.ResourceClaim{conflictingClaim}, - expectedClaims: []resourcev1alpha1.ResourceClaim{*conflictingClaim}, + claims: []*resourcev1alpha2.ResourceClaim{conflictingClaim}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*conflictingClaim}, expectedError: true, }, { name: "create-conflict", pods: []*v1.Pod{testPodWithResource}, - templates: []*resourcev1alpha1.ResourceClaimTemplate{template}, + templates: []*resourcev1alpha2.ResourceClaimTemplate{template}, key: podKey(testPodWithResource), expectedMetrics: expectedMetrics{1, 1}, expectedError: true, @@ -167,32 +167,32 @@ func TestSyncHandler(t *testing.T) { name: "stay-reserved-seen", pods: []*v1.Pod{testPodWithResource}, key: claimKey(testClaimReserved), - claims: []*resourcev1alpha1.ResourceClaim{testClaimReserved}, - expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaimReserved}, + claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved}, expectedMetrics: expectedMetrics{0, 0}, }, { name: "stay-reserved-not-seen", podsLater: []*v1.Pod{testPodWithResource}, key: claimKey(testClaimReserved), - claims: []*resourcev1alpha1.ResourceClaim{testClaimReserved}, - expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaimReserved}, + claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved}, expectedMetrics: expectedMetrics{0, 0}, }, { name: "clear-reserved", pods: []*v1.Pod{}, key: claimKey(testClaimReserved), - claims: []*resourcev1alpha1.ResourceClaim{testClaimReserved}, - expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaim}, + claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaim}, expectedMetrics: expectedMetrics{0, 0}, }, { name: "remove-reserved", pods: []*v1.Pod{testPod}, key: claimKey(testClaimReservedTwice), - claims: []*resourcev1alpha1.ResourceClaim{testClaimReservedTwice}, - expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaimReserved}, + claims: []*resourcev1alpha2.ResourceClaim{testClaimReservedTwice}, + expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved}, expectedMetrics: expectedMetrics{0, 0}, }, } @@ -223,8 +223,8 @@ func TestSyncHandler(t *testing.T) { setupMetrics() informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) podInformer := informerFactory.Core().V1().Pods() - claimInformer := informerFactory.Resource().V1alpha1().ResourceClaims() - templateInformer := informerFactory.Resource().V1alpha1().ResourceClaimTemplates() + claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() + templateInformer := informerFactory.Resource().V1alpha2().ResourceClaimTemplates() ec, err := NewController(fakeKubeClient, podInformer, claimInformer, templateInformer) if err != nil { @@ -258,7 +258,7 @@ func TestSyncHandler(t *testing.T) { t.Fatalf("unexpected success") } - claims, err := fakeKubeClient.ResourceV1alpha1().ResourceClaims("").List(ctx, metav1.ListOptions{}) + claims, err := fakeKubeClient.ResourceV1alpha2().ResourceClaims("").List(ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error while listing claims: %v", err) } @@ -268,10 +268,10 @@ func TestSyncHandler(t *testing.T) { } } -func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourcev1alpha1.ResourceClaim { - claim := &resourcev1alpha1.ResourceClaim{ +func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourcev1alpha2.ResourceClaim { + claim := &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, - Spec: resourcev1alpha1.ResourceClaimSpec{ + Spec: resourcev1alpha2.ResourceClaimSpec{ ResourceClassName: classname, }, } @@ -302,11 +302,11 @@ func makePod(name, namespace string, uid types.UID, podClaims ...v1.PodResourceC return pod } -func makeTemplate(name, namespace, classname string) *resourcev1alpha1.ResourceClaimTemplate { - template := &resourcev1alpha1.ResourceClaimTemplate{ +func makeTemplate(name, namespace, classname string) *resourcev1alpha2.ResourceClaimTemplate { + template := &resourcev1alpha2.ResourceClaimTemplate{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, - Spec: resourcev1alpha1.ResourceClaimTemplateSpec{ - Spec: resourcev1alpha1.ResourceClaimSpec{ + Spec: resourcev1alpha2.ResourceClaimTemplateSpec{ + Spec: resourcev1alpha2.ResourceClaimSpec{ ResourceClassName: classname, }, }, @@ -318,7 +318,7 @@ func podKey(pod *v1.Pod) string { return podKeyPrefix + pod.Namespace + "/" + pod.Name } -func claimKey(claim *resourcev1alpha1.ResourceClaim) string { +func claimKey(claim *resourcev1alpha2.ResourceClaim) string { return claimKeyPrefix + claim.Namespace + "/" + claim.Name } @@ -334,7 +334,7 @@ func makeOwnerReference(pod *v1.Pod, isController bool) *metav1.OwnerReference { } } -func normalizeClaims(claims []resourcev1alpha1.ResourceClaim) []resourcev1alpha1.ResourceClaim { +func normalizeClaims(claims []resourcev1alpha2.ResourceClaim) []resourcev1alpha2.ResourceClaim { sort.Slice(claims, func(i, j int) bool { return claims[i].Namespace < claims[j].Namespace || claims[i].Name < claims[j].Name diff --git a/pkg/controlplane/instance.go b/pkg/controlplane/instance.go index b2e6751ecc1..bfe7a58b79f 100644 --- a/pkg/controlplane/instance.go +++ b/pkg/controlplane/instance.go @@ -54,7 +54,7 @@ import ( policyapiv1 "k8s.io/api/policy/v1" policyapiv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingapiv1 "k8s.io/api/scheduling/v1" storageapiv1 "k8s.io/api/storage/v1" storageapiv1alpha1 "k8s.io/api/storage/v1alpha1" @@ -732,7 +732,7 @@ var ( admissionregistrationv1alpha1.SchemeGroupVersion, apiserverinternalv1alpha1.SchemeGroupVersion, authenticationv1alpha1.SchemeGroupVersion, - resourcev1alpha1.SchemeGroupVersion, + resourcev1alpha2.SchemeGroupVersion, networkingapiv1alpha1.SchemeGroupVersion, storageapiv1alpha1.SchemeGroupVersion, flowcontrolv1alpha1.SchemeGroupVersion, diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 8f5db0679c4..9df7d19d0ed 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -824,24 +824,24 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/rbac/v1beta1.RoleList": schema_k8sio_api_rbac_v1beta1_RoleList(ref), "k8s.io/api/rbac/v1beta1.RoleRef": schema_k8sio_api_rbac_v1beta1_RoleRef(ref), "k8s.io/api/rbac/v1beta1.Subject": schema_k8sio_api_rbac_v1beta1_Subject(ref), - "k8s.io/api/resource/v1alpha1.AllocationResult": schema_k8sio_api_resource_v1alpha1_AllocationResult(ref), - "k8s.io/api/resource/v1alpha1.PodScheduling": schema_k8sio_api_resource_v1alpha1_PodScheduling(ref), - "k8s.io/api/resource/v1alpha1.PodSchedulingList": schema_k8sio_api_resource_v1alpha1_PodSchedulingList(ref), - "k8s.io/api/resource/v1alpha1.PodSchedulingSpec": schema_k8sio_api_resource_v1alpha1_PodSchedulingSpec(ref), - "k8s.io/api/resource/v1alpha1.PodSchedulingStatus": schema_k8sio_api_resource_v1alpha1_PodSchedulingStatus(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaim": schema_k8sio_api_resource_v1alpha1_ResourceClaim(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimConsumerReference": schema_k8sio_api_resource_v1alpha1_ResourceClaimConsumerReference(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimList": schema_k8sio_api_resource_v1alpha1_ResourceClaimList(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimParametersReference": schema_k8sio_api_resource_v1alpha1_ResourceClaimParametersReference(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimSchedulingStatus": schema_k8sio_api_resource_v1alpha1_ResourceClaimSchedulingStatus(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimSpec": schema_k8sio_api_resource_v1alpha1_ResourceClaimSpec(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimStatus": schema_k8sio_api_resource_v1alpha1_ResourceClaimStatus(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimTemplate": schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplate(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimTemplateList": schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateList(ref), - "k8s.io/api/resource/v1alpha1.ResourceClaimTemplateSpec": schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateSpec(ref), - "k8s.io/api/resource/v1alpha1.ResourceClass": schema_k8sio_api_resource_v1alpha1_ResourceClass(ref), - "k8s.io/api/resource/v1alpha1.ResourceClassList": schema_k8sio_api_resource_v1alpha1_ResourceClassList(ref), - "k8s.io/api/resource/v1alpha1.ResourceClassParametersReference": schema_k8sio_api_resource_v1alpha1_ResourceClassParametersReference(ref), + "k8s.io/api/resource/v1alpha2.AllocationResult": schema_k8sio_api_resource_v1alpha2_AllocationResult(ref), + "k8s.io/api/resource/v1alpha2.PodScheduling": schema_k8sio_api_resource_v1alpha2_PodScheduling(ref), + "k8s.io/api/resource/v1alpha2.PodSchedulingList": schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref), + "k8s.io/api/resource/v1alpha2.PodSchedulingSpec": schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref), + "k8s.io/api/resource/v1alpha2.PodSchedulingStatus": schema_k8sio_api_resource_v1alpha2_PodSchedulingStatus(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaim": schema_k8sio_api_resource_v1alpha2_ResourceClaim(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimConsumerReference": schema_k8sio_api_resource_v1alpha2_ResourceClaimConsumerReference(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimList": schema_k8sio_api_resource_v1alpha2_ResourceClaimList(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimParametersReference": schema_k8sio_api_resource_v1alpha2_ResourceClaimParametersReference(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimSchedulingStatus": schema_k8sio_api_resource_v1alpha2_ResourceClaimSchedulingStatus(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimSpec": schema_k8sio_api_resource_v1alpha2_ResourceClaimSpec(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimStatus": schema_k8sio_api_resource_v1alpha2_ResourceClaimStatus(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimTemplate": schema_k8sio_api_resource_v1alpha2_ResourceClaimTemplate(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimTemplateList": schema_k8sio_api_resource_v1alpha2_ResourceClaimTemplateList(ref), + "k8s.io/api/resource/v1alpha2.ResourceClaimTemplateSpec": schema_k8sio_api_resource_v1alpha2_ResourceClaimTemplateSpec(ref), + "k8s.io/api/resource/v1alpha2.ResourceClass": schema_k8sio_api_resource_v1alpha2_ResourceClass(ref), + "k8s.io/api/resource/v1alpha2.ResourceClassList": schema_k8sio_api_resource_v1alpha2_ResourceClassList(ref), + "k8s.io/api/resource/v1alpha2.ResourceClassParametersReference": schema_k8sio_api_resource_v1alpha2_ResourceClassParametersReference(ref), "k8s.io/api/scheduling/v1.PriorityClass": schema_k8sio_api_scheduling_v1_PriorityClass(ref), "k8s.io/api/scheduling/v1.PriorityClassList": schema_k8sio_api_scheduling_v1_PriorityClassList(ref), "k8s.io/api/scheduling/v1alpha1.PriorityClass": schema_k8sio_api_scheduling_v1alpha1_PriorityClass(ref), @@ -41121,7 +41121,7 @@ func schema_k8sio_api_rbac_v1beta1_Subject(ref common.ReferenceCallback) common. } } -func schema_k8sio_api_resource_v1alpha1_AllocationResult(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_AllocationResult(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41156,7 +41156,7 @@ func schema_k8sio_api_resource_v1alpha1_AllocationResult(ref common.ReferenceCal } } -func schema_k8sio_api_resource_v1alpha1_PodScheduling(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_PodScheduling(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41188,14 +41188,14 @@ func schema_k8sio_api_resource_v1alpha1_PodScheduling(ref common.ReferenceCallba SchemaProps: spec.SchemaProps{ Description: "Spec describes where resources for the Pod are needed.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.PodSchedulingSpec"), + Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Description: "Status describes where resources for the Pod can be allocated.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.PodSchedulingStatus"), + Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingStatus"), }, }, }, @@ -41203,11 +41203,11 @@ func schema_k8sio_api_resource_v1alpha1_PodScheduling(ref common.ReferenceCallba }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.PodSchedulingSpec", "k8s.io/api/resource/v1alpha1.PodSchedulingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/resource/v1alpha2.PodSchedulingSpec", "k8s.io/api/resource/v1alpha2.PodSchedulingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_PodSchedulingList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41243,7 +41243,7 @@ func schema_k8sio_api_resource_v1alpha1_PodSchedulingList(ref common.ReferenceCa Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.PodScheduling"), + Ref: ref("k8s.io/api/resource/v1alpha2.PodScheduling"), }, }, }, @@ -41254,11 +41254,11 @@ func schema_k8sio_api_resource_v1alpha1_PodSchedulingList(ref common.ReferenceCa }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.PodScheduling", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/resource/v1alpha2.PodScheduling", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_PodSchedulingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41298,7 +41298,7 @@ func schema_k8sio_api_resource_v1alpha1_PodSchedulingSpec(ref common.ReferenceCa } } -func schema_k8sio_api_resource_v1alpha1_PodSchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_PodSchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41321,7 +41321,7 @@ func schema_k8sio_api_resource_v1alpha1_PodSchedulingStatus(ref common.Reference Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimSchedulingStatus"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimSchedulingStatus"), }, }, }, @@ -41331,11 +41331,11 @@ func schema_k8sio_api_resource_v1alpha1_PodSchedulingStatus(ref common.Reference }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClaimSchedulingStatus"}, + "k8s.io/api/resource/v1alpha2.ResourceClaimSchedulingStatus"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaim(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaim(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41367,14 +41367,14 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaim(ref common.ReferenceCallba SchemaProps: spec.SchemaProps{ Description: "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimSpec"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Description: "Status describes whether the resource is available and with which attributes.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimStatus"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimStatus"), }, }, }, @@ -41382,11 +41382,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaim(ref common.ReferenceCallba }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClaimSpec", "k8s.io/api/resource/v1alpha1.ResourceClaimStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/resource/v1alpha2.ResourceClaimSpec", "k8s.io/api/resource/v1alpha2.ResourceClaimStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimConsumerReference(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimConsumerReference(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41431,7 +41431,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimConsumerReference(ref commo } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41467,7 +41467,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimList(ref common.ReferenceCa Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaim"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaim"), }, }, }, @@ -41478,11 +41478,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimList(ref common.ReferenceCa }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClaim", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/resource/v1alpha2.ResourceClaim", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimParametersReference(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimParametersReference(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41519,7 +41519,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimParametersReference(ref com } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimSchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimSchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41559,7 +41559,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimSchedulingStatus(ref common } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41577,7 +41577,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimSpec(ref common.ReferenceCa "parametersRef": { SchemaProps: spec.SchemaProps{ Description: "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.", - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimParametersReference"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimParametersReference"), }, }, "allocationMode": { @@ -41592,11 +41592,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimSpec(ref common.ReferenceCa }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClaimParametersReference"}, + "k8s.io/api/resource/v1alpha2.ResourceClaimParametersReference"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41613,7 +41613,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimStatus(ref common.Reference "allocation": { SchemaProps: spec.SchemaProps{ Description: "Allocation is set by the resource driver once a resource has been allocated successfully. If this is not specified, the resource is not yet allocated.", - Ref: ref("k8s.io/api/resource/v1alpha1.AllocationResult"), + Ref: ref("k8s.io/api/resource/v1alpha2.AllocationResult"), }, }, "reservedFor": { @@ -41632,7 +41632,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimStatus(ref common.Reference Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimConsumerReference"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimConsumerReference"), }, }, }, @@ -41649,11 +41649,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimStatus(ref common.Reference }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.AllocationResult", "k8s.io/api/resource/v1alpha1.ResourceClaimConsumerReference"}, + "k8s.io/api/resource/v1alpha2.AllocationResult", "k8s.io/api/resource/v1alpha2.ResourceClaimConsumerReference"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41685,7 +41685,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplate(ref common.Referen SchemaProps: spec.SchemaProps{ Description: "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimTemplateSpec"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimTemplateSpec"), }, }, }, @@ -41693,11 +41693,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplate(ref common.Referen }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClaimTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/resource/v1alpha2.ResourceClaimTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41733,7 +41733,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateList(ref common.Ref Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimTemplate"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimTemplate"), }, }, }, @@ -41744,11 +41744,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateList(ref common.Ref }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClaimTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/resource/v1alpha2.ResourceClaimTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClaimTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41766,7 +41766,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateSpec(ref common.Ref SchemaProps: spec.SchemaProps{ Description: "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClaimSpec"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClaimSpec"), }, }, }, @@ -41774,11 +41774,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimTemplateSpec(ref common.Ref }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClaimSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/resource/v1alpha2.ResourceClaimSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClass(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClass(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41817,7 +41817,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClass(ref common.ReferenceCallba "parametersRef": { SchemaProps: spec.SchemaProps{ Description: "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.", - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClassParametersReference"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClassParametersReference"), }, }, "suitableNodes": { @@ -41831,11 +41831,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClass(ref common.ReferenceCallba }, }, Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelector", "k8s.io/api/resource/v1alpha1.ResourceClassParametersReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/core/v1.NodeSelector", "k8s.io/api/resource/v1alpha2.ResourceClassParametersReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClassList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClassList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -41871,7 +41871,7 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClassList(ref common.ReferenceCa Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha1.ResourceClass"), + Ref: ref("k8s.io/api/resource/v1alpha2.ResourceClass"), }, }, }, @@ -41882,11 +41882,11 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClassList(ref common.ReferenceCa }, }, Dependencies: []string{ - "k8s.io/api/resource/v1alpha1.ResourceClass", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/resource/v1alpha2.ResourceClass", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_k8sio_api_resource_v1alpha1_ResourceClassParametersReference(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_k8sio_api_resource_v1alpha2_ResourceClassParametersReference(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ diff --git a/pkg/kubectl/.import-restrictions b/pkg/kubectl/.import-restrictions index 064a6497011..ec6d8e80176 100644 --- a/pkg/kubectl/.import-restrictions +++ b/pkg/kubectl/.import-restrictions @@ -80,7 +80,7 @@ rules: - k8s.io/kubernetes/pkg/apis/rbac/v1beta1 - k8s.io/kubernetes/pkg/apis/resource - k8s.io/kubernetes/pkg/apis/resource/install - - k8s.io/kubernetes/pkg/apis/resource/v1alpha1 + - k8s.io/kubernetes/pkg/apis/resource/v1alpha2 - k8s.io/kubernetes/pkg/apis/scheduling - k8s.io/kubernetes/pkg/apis/scheduling/install - k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1 diff --git a/pkg/kubelet/cm/dra/manager.go b/pkg/kubelet/cm/dra/manager.go index 69dfa6eac24..3dfdb9cff75 100644 --- a/pkg/kubelet/cm/dra/manager.go +++ b/pkg/kubelet/cm/dra/manager.go @@ -87,7 +87,7 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { } // Query claim object from the API server - resourceClaim, err := m.kubeClient.ResourceV1alpha1().ResourceClaims(pod.Namespace).Get( + resourceClaim, err := m.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Get( context.TODO(), claimName, metav1.GetOptions{}) diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 6c092819b3b..b1e970babbf 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -39,7 +39,7 @@ import ( flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" networkingv1alpha1 "k8s.io/api/networking/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -613,7 +613,7 @@ func AddHandlers(h printers.PrintHandler) { resourceClassColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "DriverName", Type: "string", Description: resourcev1alpha1.ResourceClass{}.SwaggerDoc()["driverName"]}, + {Name: "DriverName", Type: "string", Description: resourcev1alpha2.ResourceClass{}.SwaggerDoc()["driverName"]}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, } _ = h.TableHandler(resourceClassColumnDefinitions, printResourceClass) @@ -621,8 +621,8 @@ func AddHandlers(h printers.PrintHandler) { resourceClaimColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "ResourceClassName", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["resourceClassName"]}, - {Name: "AllocationMode", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["allocationMode"]}, + {Name: "ResourceClassName", Type: "string", Description: resourcev1alpha2.ResourceClaimSpec{}.SwaggerDoc()["resourceClassName"]}, + {Name: "AllocationMode", Type: "string", Description: resourcev1alpha2.ResourceClaimSpec{}.SwaggerDoc()["allocationMode"]}, {Name: "State", Type: "string", Description: "A summary of the current state (allocated, pending, reserved, etc.)."}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, } @@ -631,8 +631,8 @@ func AddHandlers(h printers.PrintHandler) { resourceClaimTemplateColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "ResourceClassName", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["resourceClassName"]}, - {Name: "AllocationMode", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["allocationMode"]}, + {Name: "ResourceClassName", Type: "string", Description: resourcev1alpha2.ResourceClaimSpec{}.SwaggerDoc()["resourceClassName"]}, + {Name: "AllocationMode", Type: "string", Description: resourcev1alpha2.ResourceClaimSpec{}.SwaggerDoc()["allocationMode"]}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, } _ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplate) @@ -640,7 +640,7 @@ func AddHandlers(h printers.PrintHandler) { podSchedulingColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "SelectedNode", Type: "string", Description: resourcev1alpha1.PodSchedulingSpec{}.SwaggerDoc()["selectedNode"]}, + {Name: "SelectedNode", Type: "string", Description: resourcev1alpha2.PodSchedulingSpec{}.SwaggerDoc()["selectedNode"]}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, } _ = h.TableHandler(podSchedulingColumnDefinitions, printPodScheduling) diff --git a/pkg/registry/resource/podscheduling/strategy.go b/pkg/registry/resource/podscheduling/strategy.go index 99421213f4c..6f158bac6de 100644 --- a/pkg/registry/resource/podscheduling/strategy.go +++ b/pkg/registry/resource/podscheduling/strategy.go @@ -52,7 +52,7 @@ func (podSchedulingStrategy) NamespaceScoped() bool { // status. func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { fields := map[fieldpath.APIVersion]*fieldpath.Set{ - "resource.k8s.io/v1alpha1": fieldpath.NewSet( + "resource.k8s.io/v1alpha2": fieldpath.NewSet( fieldpath.MakePathOrDie("status"), ), } @@ -113,7 +113,7 @@ var StatusStrategy = podSchedulingStatusStrategy{Strategy} // should not be modified by the user. For a status update that is the spec. func (podSchedulingStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { fields := map[fieldpath.APIVersion]*fieldpath.Set{ - "resource.k8s.io/v1alpha1": fieldpath.NewSet( + "resource.k8s.io/v1alpha2": fieldpath.NewSet( fieldpath.MakePathOrDie("spec"), ), } diff --git a/pkg/registry/resource/resourceclaim/strategy.go b/pkg/registry/resource/resourceclaim/strategy.go index 8b71300de85..1b65e87ade6 100644 --- a/pkg/registry/resource/resourceclaim/strategy.go +++ b/pkg/registry/resource/resourceclaim/strategy.go @@ -52,7 +52,7 @@ func (resourceclaimStrategy) NamespaceScoped() bool { // status. func (resourceclaimStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { fields := map[fieldpath.APIVersion]*fieldpath.Set{ - "resource.k8s.io/v1alpha1": fieldpath.NewSet( + "resource.k8s.io/v1alpha2": fieldpath.NewSet( fieldpath.MakePathOrDie("status"), ), } @@ -113,7 +113,7 @@ var StatusStrategy = resourceclaimStatusStrategy{Strategy} // should not be modified by the user. For a status update that is the spec. func (resourceclaimStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { fields := map[fieldpath.APIVersion]*fieldpath.Set{ - "resource.k8s.io/v1alpha1": fieldpath.NewSet( + "resource.k8s.io/v1alpha2": fieldpath.NewSet( fieldpath.MakePathOrDie("spec"), ), } diff --git a/pkg/registry/resource/rest/storage_resource.go b/pkg/registry/resource/rest/storage_resource.go index 93d793e466a..6df06e5c8d2 100644 --- a/pkg/registry/resource/rest/storage_resource.go +++ b/pkg/registry/resource/rest/storage_resource.go @@ -17,7 +17,7 @@ limitations under the License. package rest import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" @@ -37,19 +37,19 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo - if storageMap, err := p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter); err != nil { + if storageMap, err := p.v1alpha2Storage(apiResourceConfigSource, restOptionsGetter); err != nil { return genericapiserver.APIGroupInfo{}, err } else if len(storageMap) > 0 { - apiGroupInfo.VersionedResourcesStorageMap[resourcev1alpha1.SchemeGroupVersion.Version] = storageMap + apiGroupInfo.VersionedResourcesStorageMap[resourcev1alpha2.SchemeGroupVersion.Version] = storageMap } return apiGroupInfo, nil } -func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) { +func (p RESTStorageProvider) v1alpha2Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) { storage := map[string]rest.Storage{} - if resource := "resourceclasses"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) { + if resource := "resourceclasses"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) { resourceClassStorage, err := resourceclassstore.NewREST(restOptionsGetter) if err != nil { return nil, err @@ -57,7 +57,7 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstora storage[resource] = resourceClassStorage } - if resource := "resourceclaims"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) { + if resource := "resourceclaims"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) { resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter) if err != nil { return nil, err @@ -66,7 +66,7 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstora storage[resource+"/status"] = resourceClaimStatusStorage } - if resource := "resourceclaimtemplates"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) { + if resource := "resourceclaimtemplates"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) { resourceClaimTemplateStorage, err := resourceclaimtemplatestore.NewREST(restOptionsGetter) if err != nil { return nil, err @@ -74,7 +74,7 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstora storage[resource] = resourceClaimTemplateStorage } - if resource := "podschedulings"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) { + if resource := "podschedulings"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) { podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingstore.NewREST(restOptionsGetter) if err != nil { return nil, err diff --git a/pkg/scheduler/eventhandlers.go b/pkg/scheduler/eventhandlers.go index 9f7b2b53d7d..da2dab1c85a 100644 --- a/pkg/scheduler/eventhandlers.go +++ b/pkg/scheduler/eventhandlers.go @@ -380,13 +380,13 @@ func addAllEventHandlers( ) case framework.PodScheduling: if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - _, _ = informerFactory.Resource().V1alpha1().PodSchedulings().Informer().AddEventHandler( + _, _ = informerFactory.Resource().V1alpha2().PodSchedulings().Informer().AddEventHandler( buildEvtResHandler(at, framework.PodScheduling, "PodScheduling"), ) } case framework.ResourceClaim: if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - _, _ = informerFactory.Resource().V1alpha1().ResourceClaims().Informer().AddEventHandler( + _, _ = informerFactory.Resource().V1alpha2().ResourceClaims().Informer().AddEventHandler( buildEvtResHandler(at, framework.ResourceClaim, "ResourceClaim"), ) } diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go index 09b5e1af06f..433068836f2 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go @@ -24,13 +24,13 @@ import ( "sync" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" - resourcev1alpha1listers "k8s.io/client-go/listers/resource/v1alpha1" + resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2" corev1helpers "k8s.io/component-helpers/scheduling/corev1" "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" "k8s.io/dynamic-resource-allocation/resourceclaim" @@ -58,7 +58,7 @@ type stateData struct { // the plugin itself successfully does an Update. // // Empty if the Pod has no claims. - claims []*resourcev1alpha1.ResourceClaim + claims []*resourcev1alpha2.ResourceClaim // The AvailableOnNodes node filters of the claims converted from the // v1 API to nodeaffinity.NodeSelector by PreFilter for repeated @@ -81,7 +81,7 @@ type stateData struct { // where it might get shared by different plugins. But in practice, // it is currently only used by dynamic provisioning and thus // managed entirely here. - podScheduling *resourcev1alpha1.PodScheduling + podScheduling *resourcev1alpha2.PodScheduling // podSchedulingDirty is true if the current copy was locally modified. podSchedulingDirty bool @@ -93,10 +93,10 @@ func (d *stateData) Clone() framework.StateData { return d } -func (d *stateData) updateClaimStatus(ctx context.Context, clientset kubernetes.Interface, index int, claim *resourcev1alpha1.ResourceClaim) error { +func (d *stateData) updateClaimStatus(ctx context.Context, clientset kubernetes.Interface, index int, claim *resourcev1alpha2.ResourceClaim) error { // TODO (#113700): replace with patch operation. Beware that patching must only succeed if the // object has not been modified in parallel by someone else. - claim, err := clientset.ResourceV1alpha1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) + claim, err := clientset.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) // TODO: metric for update results, with the operation ("set selected // node", "set PotentialNodes", etc.) as one dimension. if err != nil { @@ -115,7 +115,7 @@ func (d *stateData) updateClaimStatus(ctx context.Context, clientset kubernetes. // initializePodScheduling can be called concurrently. It returns an existing PodScheduling // object if there is one already, retrieves one if not, or as a last resort creates // one from scratch. -func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, podSchedulingLister resourcev1alpha1listers.PodSchedulingLister) (*resourcev1alpha1.PodScheduling, error) { +func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, podSchedulingLister resourcev1alpha2listers.PodSchedulingLister) (*resourcev1alpha2.PodScheduling, error) { // TODO (#113701): check if this mutex locking can be avoided by calling initializePodScheduling during PreFilter. d.mutex.Lock() defer d.mutex.Unlock() @@ -128,7 +128,7 @@ func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, po switch { case apierrors.IsNotFound(err): controller := true - podScheduling = &resourcev1alpha1.PodScheduling{ + podScheduling = &resourcev1alpha2.PodScheduling{ ObjectMeta: metav1.ObjectMeta{ Name: pod.Name, Namespace: pod.Namespace, @@ -157,7 +157,7 @@ func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, po } // publishPodScheduling creates or updates the PodScheduling object. -func (d *stateData) publishPodScheduling(ctx context.Context, clientset kubernetes.Interface, podScheduling *resourcev1alpha1.PodScheduling) error { +func (d *stateData) publishPodScheduling(ctx context.Context, clientset kubernetes.Interface, podScheduling *resourcev1alpha2.PodScheduling) error { d.mutex.Lock() defer d.mutex.Unlock() @@ -174,10 +174,10 @@ func (d *stateData) publishPodScheduling(ctx context.Context, clientset kubernet logger.V(5).Info(msg, "podscheduling", klog.KObj(podScheduling)) } if podScheduling.UID == "" { - podScheduling, err = clientset.ResourceV1alpha1().PodSchedulings(podScheduling.Namespace).Create(ctx, podScheduling, metav1.CreateOptions{}) + podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Create(ctx, podScheduling, metav1.CreateOptions{}) } else { // TODO (#113700): patch here to avoid racing with drivers which update the status. - podScheduling, err = clientset.ResourceV1alpha1().PodSchedulings(podScheduling.Namespace).Update(ctx, podScheduling, metav1.UpdateOptions{}) + podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Update(ctx, podScheduling, metav1.UpdateOptions{}) } if err != nil { return err @@ -188,7 +188,7 @@ func (d *stateData) publishPodScheduling(ctx context.Context, clientset kubernet } // storePodScheduling replaces the pod scheduling object in the state. -func (d *stateData) storePodScheduling(podScheduling *resourcev1alpha1.PodScheduling) { +func (d *stateData) storePodScheduling(podScheduling *resourcev1alpha2.PodScheduling) { d.mutex.Lock() defer d.mutex.Unlock() @@ -196,7 +196,7 @@ func (d *stateData) storePodScheduling(podScheduling *resourcev1alpha1.PodSchedu d.podSchedulingDirty = true } -func statusForClaim(podScheduling *resourcev1alpha1.PodScheduling, podClaimName string) *resourcev1alpha1.ResourceClaimSchedulingStatus { +func statusForClaim(podScheduling *resourcev1alpha2.PodScheduling, podClaimName string) *resourcev1alpha2.ResourceClaimSchedulingStatus { for _, status := range podScheduling.Status.ResourceClaims { if status.Name == podClaimName { return &status @@ -209,9 +209,9 @@ func statusForClaim(podScheduling *resourcev1alpha1.PodScheduling, podClaimName type dynamicResources struct { enabled bool clientset kubernetes.Interface - claimLister resourcev1alpha1listers.ResourceClaimLister - classLister resourcev1alpha1listers.ResourceClassLister - podSchedulingLister resourcev1alpha1listers.PodSchedulingLister + claimLister resourcev1alpha2listers.ResourceClaimLister + classLister resourcev1alpha2listers.ResourceClassLister + podSchedulingLister resourcev1alpha2listers.PodSchedulingLister } // New initializes a new plugin and returns it. @@ -224,9 +224,9 @@ func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram return &dynamicResources{ enabled: true, clientset: fh.ClientSet(), - claimLister: fh.SharedInformerFactory().Resource().V1alpha1().ResourceClaims().Lister(), - classLister: fh.SharedInformerFactory().Resource().V1alpha1().ResourceClasses().Lister(), - podSchedulingLister: fh.SharedInformerFactory().Resource().V1alpha1().PodSchedulings().Lister(), + claimLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Lister(), + classLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClasses().Lister(), + podSchedulingLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulings().Lister(), }, nil } @@ -266,8 +266,8 @@ func (pl *dynamicResources) EventsToRegister() []framework.ClusterEvent { } // podResourceClaims returns the ResourceClaims for all pod.Spec.PodResourceClaims. -func (pl *dynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourcev1alpha1.ResourceClaim, error) { - claims := make([]*resourcev1alpha1.ResourceClaim, 0, len(pod.Spec.ResourceClaims)) +func (pl *dynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourcev1alpha2.ResourceClaim, error) { + claims := make([]*resourcev1alpha2.ResourceClaim, 0, len(pod.Spec.ResourceClaims)) for _, resource := range pod.Spec.ResourceClaims { claimName := resourceclaim.Name(pod, &resource) isEphemeral := resource.Source.ResourceClaimTemplateName != nil @@ -329,7 +329,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl s.availableOnNodes = make([]*nodeaffinity.NodeSelector, len(claims)) for index, claim := range claims { - if claim.Spec.AllocationMode == resourcev1alpha1.AllocationModeImmediate && + if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeImmediate && claim.Status.Allocation == nil { // This will get resolved by the resource driver. return nil, statusUnschedulable(logger, "unallocated immediate resourceclaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim)) @@ -414,7 +414,7 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState case claim.Status.DeallocationRequested: // We shouldn't get here. PreFilter already checked this. return statusUnschedulable(logger, "resourceclaim must be reallocated", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim)) - case claim.Spec.AllocationMode == resourcev1alpha1.AllocationModeWaitForFirstConsumer: + case claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer: // The ResourceClass might have a node filter. This is // useful for trimming the initial set of potential // nodes before we ask the driver(s) for information @@ -468,7 +468,7 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState // delayed allocation. Claims with immediate allocation // would just get allocated again for a random node, // which is unlikely to help the pod. - if claim.Spec.AllocationMode == resourcev1alpha1.AllocationModeWaitForFirstConsumer { + if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer { state.unavailableClaims.Insert(unavailableClaims...) } } @@ -548,8 +548,8 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta logger.V(5).Info("remembering potential nodes", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes)) podScheduling = podScheduling.DeepCopy() numNodes := len(nodes) - if numNodes > resourcev1alpha1.PodSchedulingNodeListMaxSize { - numNodes = resourcev1alpha1.PodSchedulingNodeListMaxSize + if numNodes > resourcev1alpha2.PodSchedulingNodeListMaxSize { + numNodes = resourcev1alpha2.PodSchedulingNodeListMaxSize } podScheduling.Spec.PotentialNodes = make([]string, 0, numNodes) if numNodes == len(nodes) { @@ -567,7 +567,7 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta nodeNames[node.Name] = struct{}{} } for nodeName := range nodeNames { - if len(podScheduling.Spec.PotentialNodes) >= resourcev1alpha1.PodSchedulingNodeListMaxSize { + if len(podScheduling.Spec.PotentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize { break } podScheduling.Spec.PotentialNodes = append(podScheduling.Spec.PotentialNodes, nodeName) @@ -627,13 +627,13 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat } claim := claim.DeepCopy() claim.Status.ReservedFor = append(claim.Status.ReservedFor, - resourcev1alpha1.ResourceClaimConsumerReference{ + resourcev1alpha2.ResourceClaimConsumerReference{ Resource: "pods", Name: pod.Name, UID: pod.UID, }) logger.V(5).Info("reserve", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}, "resourceclaim", klog.KObj(claim)) - _, err := pl.clientset.ResourceV1alpha1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) + _, err := pl.clientset.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) // TODO: metric for update errors. if err != nil { return statusError(logger, err) @@ -727,7 +727,7 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt resourceclaim.IsReservedForPod(pod, claim) { // Remove pod from ReservedFor. claim := claim.DeepCopy() - reservedFor := make([]resourcev1alpha1.ResourceClaimConsumerReference, 0, len(claim.Status.ReservedFor)-1) + reservedFor := make([]resourcev1alpha2.ResourceClaimConsumerReference, 0, len(claim.Status.ReservedFor)-1) for _, reserved := range claim.Status.ReservedFor { // TODO: can UID be assumed to be unique all resources or do we also need to compare Group/Version/Resource? if reserved.UID != pod.UID { @@ -767,7 +767,7 @@ func (pl *dynamicResources) PostBind(ctx context.Context, cs *framework.CycleSta // have it in our informer cache yet. Let's try to delete, just to be // on the safe side. logger := klog.FromContext(ctx) - err = pl.clientset.ResourceV1alpha1().PodSchedulings(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) + err = pl.clientset.ResourceV1alpha2().PodSchedulings(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) switch { case apierrors.IsNotFound(err): logger.V(5).Info("no PodScheduling object to delete") diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go index 2d135e8bf82..8af72dadd30 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go @@ -29,7 +29,7 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -57,7 +57,7 @@ var ( className = "my-resource-class" namespace = "default" - resourceClass = &resourcev1alpha1.ResourceClass{ + resourceClass = &resourcev1alpha2.ResourceClass{ ObjectMeta: metav1.ObjectMeta{ Name: className, }, @@ -90,34 +90,34 @@ var ( ResourceClassName(className). Obj() pendingImmediateClaim = st.FromResourceClaim(claim). - AllocationMode(resourcev1alpha1.AllocationModeImmediate). + AllocationMode(resourcev1alpha2.AllocationModeImmediate). Obj() pendingDelayedClaim = st.FromResourceClaim(claim). - AllocationMode(resourcev1alpha1.AllocationModeWaitForFirstConsumer). + AllocationMode(resourcev1alpha2.AllocationModeWaitForFirstConsumer). Obj() pendingDelayedClaim2 = st.FromResourceClaim(pendingDelayedClaim). Name(claimName2). Obj() deallocatingClaim = st.FromResourceClaim(pendingImmediateClaim). - Allocation(&resourcev1alpha1.AllocationResult{}). + Allocation(&resourcev1alpha2.AllocationResult{}). DeallocationRequested(true). Obj() inUseClaim = st.FromResourceClaim(pendingImmediateClaim). - Allocation(&resourcev1alpha1.AllocationResult{}). - ReservedFor(resourcev1alpha1.ResourceClaimConsumerReference{UID: types.UID(podUID)}). + Allocation(&resourcev1alpha2.AllocationResult{}). + ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{UID: types.UID(podUID)}). Obj() allocatedClaim = st.FromResourceClaim(pendingDelayedClaim). OwnerReference(podName, podUID, podKind). - Allocation(&resourcev1alpha1.AllocationResult{}). + Allocation(&resourcev1alpha2.AllocationResult{}). Obj() allocatedDelayedClaimWithWrongTopology = st.FromResourceClaim(allocatedClaim). - Allocation(&resourcev1alpha1.AllocationResult{AvailableOnNodes: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}).Obj()}). + Allocation(&resourcev1alpha2.AllocationResult{AvailableOnNodes: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}).Obj()}). Obj() allocatedImmediateClaimWithWrongTopology = st.FromResourceClaim(allocatedDelayedClaimWithWrongTopology). - AllocationMode(resourcev1alpha1.AllocationModeImmediate). + AllocationMode(resourcev1alpha2.AllocationModeImmediate). Obj() allocatedClaimWithGoodTopology = st.FromResourceClaim(allocatedClaim). - Allocation(&resourcev1alpha1.AllocationResult{AvailableOnNodes: st.MakeNodeSelector().In("nodename", []string{"worker"}).Obj()}). + Allocation(&resourcev1alpha2.AllocationResult{AvailableOnNodes: st.MakeNodeSelector().In("nodename", []string{"worker"}).Obj()}). Obj() otherClaim = st.MakeResourceClaim(). Name("not-my-claim"). @@ -135,8 +135,8 @@ var ( SelectedNode(workerNode.Name). Obj() schedulingInfo = st.FromPodScheduling(schedulingPotential). - ResourceClaims(resourcev1alpha1.ResourceClaimSchedulingStatus{Name: resourceName}, - resourcev1alpha1.ResourceClaimSchedulingStatus{Name: resourceName2}). + ResourceClaims(resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName}, + resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName2}). Obj() ) @@ -160,8 +160,8 @@ type result struct { // functions will get called for all objects of that type. If they needs to // make changes only to a particular instance, then it must check the name. type change struct { - scheduling func(*resourcev1alpha1.PodScheduling) *resourcev1alpha1.PodScheduling - claim func(*resourcev1alpha1.ResourceClaim) *resourcev1alpha1.ResourceClaim + scheduling func(*resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling + claim func(*resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim } type perNodeResult map[string]result @@ -201,9 +201,9 @@ func TestPlugin(t *testing.T) { testcases := map[string]struct { nodes []*v1.Node // default if unset is workerNode pod *v1.Pod - claims []*resourcev1alpha1.ResourceClaim - classes []*resourcev1alpha1.ResourceClass - schedulings []*resourcev1alpha1.PodScheduling + claims []*resourcev1alpha2.ResourceClaim + classes []*resourcev1alpha2.ResourceClass + schedulings []*resourcev1alpha2.PodScheduling prepare prepare want want @@ -213,11 +213,11 @@ func TestPlugin(t *testing.T) { }, "claim-reference": { pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{allocatedClaim, otherClaim}, + claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim}, }, "claim-template": { pod: podWithClaimTemplate, - claims: []*resourcev1alpha1.ResourceClaim{allocatedClaim, otherClaim}, + claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim}, }, "missing-claim": { pod: podWithClaimTemplate, @@ -232,7 +232,7 @@ func TestPlugin(t *testing.T) { }, "waiting-for-immediate-allocation": { pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{pendingImmediateClaim}, + claims: []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim}, want: want{ prefilter: result{ status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `unallocated immediate resourceclaim`), @@ -244,7 +244,7 @@ func TestPlugin(t *testing.T) { }, "waiting-for-deallocation": { pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{deallocatingClaim}, + claims: []*resourcev1alpha2.ResourceClaim{deallocatingClaim}, want: want{ prefilter: result{ status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim must be reallocated`), @@ -256,7 +256,7 @@ func TestPlugin(t *testing.T) { }, "delayed-allocation-missing-class": { pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{pendingDelayedClaim}, + claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, want: want{ filter: perNodeResult{ workerNode.Name: { @@ -272,8 +272,8 @@ func TestPlugin(t *testing.T) { // Create the PodScheduling object, ask for information // and select a node. pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{pendingDelayedClaim}, - classes: []*resourcev1alpha1.ResourceClass{resourceClass}, + claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, + classes: []*resourcev1alpha2.ResourceClass{resourceClass}, want: want{ reserve: result{ status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`), @@ -286,8 +286,8 @@ func TestPlugin(t *testing.T) { // information, but do not select a node because // there are multiple claims. pod: podWithTwoClaimNames, - claims: []*resourcev1alpha1.ResourceClaim{pendingDelayedClaim, pendingDelayedClaim2}, - classes: []*resourcev1alpha1.ResourceClass{resourceClass}, + claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim, pendingDelayedClaim2}, + classes: []*resourcev1alpha2.ResourceClass{resourceClass}, want: want{ reserve: result{ status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to provide information`), @@ -299,14 +299,14 @@ func TestPlugin(t *testing.T) { // Use the populated PodScheduling object to select a // node. pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{pendingDelayedClaim}, - schedulings: []*resourcev1alpha1.PodScheduling{schedulingInfo}, - classes: []*resourcev1alpha1.ResourceClass{resourceClass}, + claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, + schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo}, + classes: []*resourcev1alpha2.ResourceClass{resourceClass}, want: want{ reserve: result{ status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`), changes: change{ - scheduling: func(in *resourcev1alpha1.PodScheduling) *resourcev1alpha1.PodScheduling { + scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { return st.FromPodScheduling(in). SelectedNode(workerNode.Name). Obj() @@ -319,12 +319,12 @@ func TestPlugin(t *testing.T) { // Use the populated PodScheduling object to select a // node. pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{pendingDelayedClaim}, - schedulings: []*resourcev1alpha1.PodScheduling{schedulingInfo}, - classes: []*resourcev1alpha1.ResourceClass{resourceClass}, + claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, + schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo}, + classes: []*resourcev1alpha2.ResourceClass{resourceClass}, prepare: prepare{ reserve: change{ - scheduling: func(in *resourcev1alpha1.PodScheduling) *resourcev1alpha1.PodScheduling { + scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { // This does not actually conflict with setting the // selected node, but because the plugin is not using // patching yet, Update nonetheless fails. @@ -343,15 +343,15 @@ func TestPlugin(t *testing.T) { "delayed-allocation-scheduling-completed": { // Remove PodScheduling object once the pod is scheduled. pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{allocatedClaim}, - schedulings: []*resourcev1alpha1.PodScheduling{schedulingInfo}, - classes: []*resourcev1alpha1.ResourceClass{resourceClass}, + claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim}, + schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo}, + classes: []*resourcev1alpha2.ResourceClass{resourceClass}, want: want{ reserve: result{ changes: change{ - claim: func(in *resourcev1alpha1.ResourceClaim) *resourcev1alpha1.ResourceClaim { + claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim { return st.FromResourceClaim(in). - ReservedFor(resourcev1alpha1.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}). + ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}). Obj() }, }, @@ -364,9 +364,9 @@ func TestPlugin(t *testing.T) { "in-use-by-other": { nodes: []*v1.Node{}, pod: otherPodWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{inUseClaim}, - classes: []*resourcev1alpha1.ResourceClass{}, - schedulings: []*resourcev1alpha1.PodScheduling{}, + claims: []*resourcev1alpha2.ResourceClaim{inUseClaim}, + classes: []*resourcev1alpha2.ResourceClass{}, + schedulings: []*resourcev1alpha2.PodScheduling{}, prepare: prepare{}, want: want{ prefilter: result{ @@ -381,7 +381,7 @@ func TestPlugin(t *testing.T) { // PostFilter tries to get the pod scheduleable by // deallocating the claim. pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{allocatedDelayedClaimWithWrongTopology}, + claims: []*resourcev1alpha2.ResourceClaim{allocatedDelayedClaimWithWrongTopology}, want: want{ filter: perNodeResult{ workerNode.Name: { @@ -391,7 +391,7 @@ func TestPlugin(t *testing.T) { postfilter: result{ // Claims with delayed allocation get deallocated. changes: change{ - claim: func(in *resourcev1alpha1.ResourceClaim) *resourcev1alpha1.ResourceClaim { + claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim { return st.FromResourceClaim(in). DeallocationRequested(true). Obj() @@ -404,7 +404,7 @@ func TestPlugin(t *testing.T) { // PostFilter tries to get the pod scheduleable by // deallocating the claim. pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{allocatedImmediateClaimWithWrongTopology}, + claims: []*resourcev1alpha2.ResourceClaim{allocatedImmediateClaimWithWrongTopology}, want: want{ filter: perNodeResult{ workerNode.Name: { @@ -420,13 +420,13 @@ func TestPlugin(t *testing.T) { }, "good-topology": { pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{allocatedClaimWithGoodTopology}, + claims: []*resourcev1alpha2.ResourceClaim{allocatedClaimWithGoodTopology}, want: want{ reserve: result{ changes: change{ - claim: func(in *resourcev1alpha1.ResourceClaim) *resourcev1alpha1.ResourceClaim { + claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim { return st.FromResourceClaim(in). - ReservedFor(resourcev1alpha1.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}). + ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}). Obj() }, }, @@ -435,7 +435,7 @@ func TestPlugin(t *testing.T) { }, "reserved-okay": { pod: podWithClaimName, - claims: []*resourcev1alpha1.ResourceClaim{inUseClaim}, + claims: []*resourcev1alpha2.ResourceClaim{inUseClaim}, }, } @@ -586,12 +586,12 @@ func stripObjects(objects []metav1.Object) { func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) { t.Helper() - claims, err := tc.client.ResourceV1alpha1().ResourceClaims("").List(tc.ctx, metav1.ListOptions{}) + claims, err := tc.client.ResourceV1alpha2().ResourceClaims("").List(tc.ctx, metav1.ListOptions{}) require.NoError(t, err, "list claims") for _, claim := range claims.Items { objects = append(objects, &claim) } - schedulings, err := tc.client.ResourceV1alpha1().PodSchedulings("").List(tc.ctx, metav1.ListOptions{}) + schedulings, err := tc.client.ResourceV1alpha2().PodSchedulings("").List(tc.ctx, metav1.ListOptions{}) require.NoError(t, err, "list pod scheduling") for _, scheduling := range schedulings.Items { objects = append(objects, &scheduling) @@ -609,14 +609,14 @@ func (tc *testContext) updateAPIServer(t *testing.T, objects []metav1.Object, up if diff := cmp.Diff(objects[i], obj); diff != "" { t.Logf("Updating %T %q, diff (-old, +new):\n%s", obj, obj.GetName(), diff) switch obj := obj.(type) { - case *resourcev1alpha1.ResourceClaim: - obj, err := tc.client.ResourceV1alpha1().ResourceClaims(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{}) + case *resourcev1alpha2.ResourceClaim: + obj, err := tc.client.ResourceV1alpha2().ResourceClaims(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error during prepare update: %v", err) } modified[i] = obj - case *resourcev1alpha1.PodScheduling: - obj, err := tc.client.ResourceV1alpha1().PodSchedulings(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{}) + case *resourcev1alpha2.PodScheduling: + obj, err := tc.client.ResourceV1alpha2().PodSchedulings(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error during prepare update: %v", err) } @@ -646,11 +646,11 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje for _, obj := range objects { switch in := obj.(type) { - case *resourcev1alpha1.ResourceClaim: + case *resourcev1alpha2.ResourceClaim: if updates.claim != nil { obj = updates.claim(in) } - case *resourcev1alpha1.PodScheduling: + case *resourcev1alpha2.PodScheduling: if updates.scheduling != nil { obj = updates.scheduling(in) } @@ -661,7 +661,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje return updated } -func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha1.ResourceClaim, classes []*resourcev1alpha1.ResourceClass, schedulings []*resourcev1alpha1.PodScheduling) (result *testContext) { +func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodScheduling) (result *testContext) { t.Helper() tc := &testContext{} @@ -694,15 +694,15 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha1.ResourceCl // The tests use the API to create the objects because then reactors // get triggered. for _, claim := range claims { - _, err := tc.client.ResourceV1alpha1().ResourceClaims(claim.Namespace).Create(tc.ctx, claim, metav1.CreateOptions{}) + _, err := tc.client.ResourceV1alpha2().ResourceClaims(claim.Namespace).Create(tc.ctx, claim, metav1.CreateOptions{}) require.NoError(t, err, "create resource claim") } for _, class := range classes { - _, err := tc.client.ResourceV1alpha1().ResourceClasses().Create(tc.ctx, class, metav1.CreateOptions{}) + _, err := tc.client.ResourceV1alpha2().ResourceClasses().Create(tc.ctx, class, metav1.CreateOptions{}) require.NoError(t, err, "create resource class") } for _, scheduling := range schedulings { - _, err := tc.client.ResourceV1alpha1().PodSchedulings(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{}) + _, err := tc.client.ResourceV1alpha2().PodSchedulings(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{}) require.NoError(t, err, "create pod scheduling") } diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 70579dd800e..16fd5040f3b 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -20,7 +20,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -846,20 +846,20 @@ func (p *PersistentVolumeWrapper) HostPathVolumeSource(src *v1.HostPathVolumeSou } // ResourceClaimWrapper wraps a ResourceClaim inside. -type ResourceClaimWrapper struct{ resourcev1alpha1.ResourceClaim } +type ResourceClaimWrapper struct{ resourcev1alpha2.ResourceClaim } // MakeResourceClaim creates a ResourceClaim wrapper. func MakeResourceClaim() *ResourceClaimWrapper { - return &ResourceClaimWrapper{resourcev1alpha1.ResourceClaim{}} + return &ResourceClaimWrapper{resourcev1alpha2.ResourceClaim{}} } // FromResourceClaim creates a ResourceClaim wrapper from some existing object. -func FromResourceClaim(other *resourcev1alpha1.ResourceClaim) *ResourceClaimWrapper { +func FromResourceClaim(other *resourcev1alpha2.ResourceClaim) *ResourceClaimWrapper { return &ResourceClaimWrapper{*other.DeepCopy()} } // Obj returns the inner ResourceClaim. -func (wrapper *ResourceClaimWrapper) Obj() *resourcev1alpha1.ResourceClaim { +func (wrapper *ResourceClaimWrapper) Obj() *resourcev1alpha2.ResourceClaim { return &wrapper.ResourceClaim } @@ -896,7 +896,7 @@ func (wrapper *ResourceClaimWrapper) OwnerReference(name, uid string, gvk schema } // AllocationMode sets the allocation mode of the inner object. -func (wrapper *ResourceClaimWrapper) AllocationMode(a resourcev1alpha1.AllocationMode) *ResourceClaimWrapper { +func (wrapper *ResourceClaimWrapper) AllocationMode(a resourcev1alpha2.AllocationMode) *ResourceClaimWrapper { wrapper.ResourceClaim.Spec.AllocationMode = a return wrapper } @@ -908,7 +908,7 @@ func (wrapper *ResourceClaimWrapper) ResourceClassName(name string) *ResourceCla } // Allocation sets the allocation of the inner object. -func (wrapper *ResourceClaimWrapper) Allocation(allocation *resourcev1alpha1.AllocationResult) *ResourceClaimWrapper { +func (wrapper *ResourceClaimWrapper) Allocation(allocation *resourcev1alpha2.AllocationResult) *ResourceClaimWrapper { wrapper.ResourceClaim.Status.Allocation = allocation return wrapper } @@ -920,26 +920,26 @@ func (wrapper *ResourceClaimWrapper) DeallocationRequested(deallocationRequested } // ReservedFor sets that field of the inner object. -func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourcev1alpha1.ResourceClaimConsumerReference) *ResourceClaimWrapper { +func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourcev1alpha2.ResourceClaimConsumerReference) *ResourceClaimWrapper { wrapper.ResourceClaim.Status.ReservedFor = consumers return wrapper } // PodSchedulingWrapper wraps a PodScheduling inside. -type PodSchedulingWrapper struct{ resourcev1alpha1.PodScheduling } +type PodSchedulingWrapper struct{ resourcev1alpha2.PodScheduling } // MakePodScheduling creates a PodScheduling wrapper. func MakePodScheduling() *PodSchedulingWrapper { - return &PodSchedulingWrapper{resourcev1alpha1.PodScheduling{}} + return &PodSchedulingWrapper{resourcev1alpha2.PodScheduling{}} } // FromPodScheduling creates a PodScheduling wrapper from some existing object. -func FromPodScheduling(other *resourcev1alpha1.PodScheduling) *PodSchedulingWrapper { +func FromPodScheduling(other *resourcev1alpha2.PodScheduling) *PodSchedulingWrapper { return &PodSchedulingWrapper{*other.DeepCopy()} } // Obj returns the inner object. -func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha1.PodScheduling { +func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha2.PodScheduling { return &wrapper.PodScheduling } @@ -997,7 +997,7 @@ func (wrapper *PodSchedulingWrapper) PotentialNodes(nodes ...string) *PodSchedul } // ResourceClaims sets that field of the inner object. -func (wrapper *PodSchedulingWrapper) ResourceClaims(statuses ...resourcev1alpha1.ResourceClaimSchedulingStatus) *PodSchedulingWrapper { +func (wrapper *PodSchedulingWrapper) ResourceClaims(statuses ...resourcev1alpha2.ResourceClaimSchedulingStatus) *PodSchedulingWrapper { wrapper.Status.ResourceClaims = statuses return wrapper } diff --git a/staging/src/k8s.io/api/resource/v1alpha1/doc.go b/staging/src/k8s.io/api/resource/v1alpha2/doc.go similarity index 84% rename from staging/src/k8s.io/api/resource/v1alpha1/doc.go rename to staging/src/k8s.io/api/resource/v1alpha2/doc.go index 8fa577fabc8..d9c20e089d9 100644 --- a/staging/src/k8s.io/api/resource/v1alpha1/doc.go +++ b/staging/src/k8s.io/api/resource/v1alpha2/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +groupName=resource.k8s.io -// Package v1alpha1 is the v1alpha1 version of the resource API. -package v1alpha1 // import "k8s.io/api/resource/v1alpha1" +// Package v1alpha2 is the v1alpha2 version of the resource API. +package v1alpha2 // import "k8s.io/api/resource/v1alpha2" diff --git a/staging/src/k8s.io/api/resource/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/resource/v1alpha2/generated.pb.go similarity index 92% rename from staging/src/k8s.io/api/resource/v1alpha1/generated.pb.go rename to staging/src/k8s.io/api/resource/v1alpha2/generated.pb.go index 632ad04259e..630eb53a5b8 100644 --- a/staging/src/k8s.io/api/resource/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/resource/v1alpha2/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto -package v1alpha1 +package v1alpha2 import ( fmt "fmt" @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AllocationResult) Reset() { *m = AllocationResult{} } func (*AllocationResult) ProtoMessage() {} func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{0} + return fileDescriptor_3add37bbd52889e0, []int{0} } func (m *AllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_AllocationResult proto.InternalMessageInfo func (m *PodScheduling) Reset() { *m = PodScheduling{} } func (*PodScheduling) ProtoMessage() {} func (*PodScheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{1} + return fileDescriptor_3add37bbd52889e0, []int{1} } func (m *PodScheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_PodScheduling proto.InternalMessageInfo func (m *PodSchedulingList) Reset() { *m = PodSchedulingList{} } func (*PodSchedulingList) ProtoMessage() {} func (*PodSchedulingList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{2} + return fileDescriptor_3add37bbd52889e0, []int{2} } func (m *PodSchedulingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_PodSchedulingList proto.InternalMessageInfo func (m *PodSchedulingSpec) Reset() { *m = PodSchedulingSpec{} } func (*PodSchedulingSpec) ProtoMessage() {} func (*PodSchedulingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{3} + return fileDescriptor_3add37bbd52889e0, []int{3} } func (m *PodSchedulingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_PodSchedulingSpec proto.InternalMessageInfo func (m *PodSchedulingStatus) Reset() { *m = PodSchedulingStatus{} } func (*PodSchedulingStatus) ProtoMessage() {} func (*PodSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{4} + return fileDescriptor_3add37bbd52889e0, []int{4} } func (m *PodSchedulingStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_PodSchedulingStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{5} + return fileDescriptor_3add37bbd52889e0, []int{5} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{6} + return fileDescriptor_3add37bbd52889e0, []int{6} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +245,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{7} + return fileDescriptor_3add37bbd52889e0, []int{7} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +273,7 @@ var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } func (*ResourceClaimParametersReference) ProtoMessage() {} func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{8} + return fileDescriptor_3add37bbd52889e0, []int{8} } func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +301,7 @@ var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } func (*ResourceClaimSchedulingStatus) ProtoMessage() {} func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{9} + return fileDescriptor_3add37bbd52889e0, []int{9} } func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +329,7 @@ var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{10} + return fileDescriptor_3add37bbd52889e0, []int{10} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +357,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{11} + return fileDescriptor_3add37bbd52889e0, []int{11} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +385,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{12} + return fileDescriptor_3add37bbd52889e0, []int{12} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +413,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{13} + return fileDescriptor_3add37bbd52889e0, []int{13} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +441,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{14} + return fileDescriptor_3add37bbd52889e0, []int{14} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +469,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo func (m *ResourceClass) Reset() { *m = ResourceClass{} } func (*ResourceClass) ProtoMessage() {} func (*ResourceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{15} + return fileDescriptor_3add37bbd52889e0, []int{15} } func (m *ResourceClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +497,7 @@ var xxx_messageInfo_ResourceClass proto.InternalMessageInfo func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } func (*ResourceClassList) ProtoMessage() {} func (*ResourceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{16} + return fileDescriptor_3add37bbd52889e0, []int{16} } func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -525,7 +525,7 @@ var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } func (*ResourceClassParametersReference) ProtoMessage() {} func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{17} + return fileDescriptor_3add37bbd52889e0, []int{17} } func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -551,106 +551,106 @@ func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo func init() { - proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha1.AllocationResult") - proto.RegisterType((*PodScheduling)(nil), "k8s.io.api.resource.v1alpha1.PodScheduling") - proto.RegisterType((*PodSchedulingList)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingList") - proto.RegisterType((*PodSchedulingSpec)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingSpec") - proto.RegisterType((*PodSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingStatus") - proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaim") - proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimConsumerReference") - proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimList") - proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimParametersReference") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSchedulingStatus") - proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSpec") - proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimStatus") - proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplate") - proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateList") - proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateSpec") - proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha1.ResourceClass") - proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassList") - proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassParametersReference") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") + proto.RegisterType((*PodScheduling)(nil), "k8s.io.api.resource.v1alpha2.PodScheduling") + proto.RegisterType((*PodSchedulingList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingList") + proto.RegisterType((*PodSchedulingSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingSpec") + proto.RegisterType((*PodSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") + proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") + proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") + proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto", fileDescriptor_a66b2ee03d862be2) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_3add37bbd52889e0) } -var fileDescriptor_a66b2ee03d862be2 = []byte{ +var fileDescriptor_3add37bbd52889e0 = []byte{ // 1174 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x6e, 0x95, 0x8c, 0x1b, 0x37, 0xd9, 0x34, 0xc8, 0x8d, 0x5a, 0xdb, 0xec, 0xc9, - 0x12, 0xb0, 0xdb, 0x04, 0x04, 0x15, 0x1f, 0x95, 0xb2, 0x0d, 0x94, 0x08, 0x9a, 0x9a, 0x31, 0x91, + 0x14, 0xcf, 0xc6, 0x6e, 0x95, 0x8c, 0x1b, 0x37, 0xd9, 0xb4, 0xc8, 0xad, 0x5a, 0xdb, 0xec, 0xc9, + 0x12, 0xb0, 0xdb, 0x18, 0x04, 0x15, 0x1f, 0x95, 0xb2, 0x0d, 0x94, 0x08, 0x9a, 0x9a, 0x31, 0x91, 0x08, 0x42, 0x88, 0xf1, 0xee, 0xab, 0xbd, 0x64, 0xbf, 0xd8, 0xd9, 0x35, 0xaa, 0xb8, 0xf4, 0xca, 0x0d, 0x21, 0xee, 0x1c, 0xf9, 0x43, 0x10, 0x52, 0x8e, 0x91, 0xe0, 0xd0, 0x93, 0x45, 0xcc, 0x81, 0x3f, 0x80, 0x13, 0x3d, 0xa1, 0x19, 0xef, 0xae, 0x77, 0xd6, 0x1f, 0xc4, 0x11, 0x8a, 0xc2, 0x29, - 0x99, 0x79, 0xbf, 0xf7, 0x9b, 0xf7, 0x31, 0xef, 0xcd, 0x5b, 0xa3, 0x77, 0x8f, 0xee, 0x52, 0xd5, - 0xf2, 0xb4, 0xa3, 0xa8, 0x0d, 0x81, 0x0b, 0x21, 0x50, 0xad, 0x07, 0xae, 0xe9, 0x05, 0x5a, 0x2c, - 0x20, 0xbe, 0xa5, 0x05, 0x40, 0xbd, 0x28, 0x30, 0x40, 0xeb, 0x6d, 0x11, 0xdb, 0xef, 0x92, 0x2d, - 0xad, 0x03, 0x2e, 0x04, 0x24, 0x04, 0x53, 0xf5, 0x03, 0x2f, 0xf4, 0xe4, 0x5b, 0x43, 0xb4, 0x4a, - 0x7c, 0x4b, 0x4d, 0xd0, 0x6a, 0x82, 0xde, 0x7c, 0xa5, 0x63, 0x85, 0xdd, 0xa8, 0xad, 0x1a, 0x9e, - 0xa3, 0x75, 0xbc, 0x8e, 0xa7, 0x71, 0xa5, 0x76, 0xf4, 0x98, 0xaf, 0xf8, 0x82, 0xff, 0x37, 0x24, - 0xdb, 0x54, 0x32, 0x47, 0x1b, 0x5e, 0xc0, 0x8e, 0xcd, 0x1f, 0xb8, 0xf9, 0xda, 0x08, 0xe3, 0x10, - 0xa3, 0x6b, 0xb9, 0x10, 0x3c, 0xd1, 0xfc, 0xa3, 0x0e, 0xdb, 0xa0, 0x9a, 0x03, 0x21, 0x99, 0xa4, - 0xa5, 0x4d, 0xd3, 0x0a, 0x22, 0x37, 0xb4, 0x1c, 0x18, 0x53, 0x78, 0xfd, 0xdf, 0x14, 0xa8, 0xd1, - 0x05, 0x87, 0xe4, 0xf5, 0x94, 0x3f, 0x25, 0xb4, 0xba, 0x63, 0xdb, 0x9e, 0x41, 0x42, 0xcb, 0x73, - 0x31, 0xd0, 0xc8, 0x0e, 0xe5, 0x7b, 0xa8, 0x9c, 0xc4, 0xe6, 0x7d, 0xe2, 0x9a, 0x36, 0x54, 0xa4, - 0xba, 0xd4, 0x58, 0xd6, 0x5f, 0x38, 0xee, 0xd7, 0x16, 0x06, 0xfd, 0x5a, 0x19, 0x0b, 0x52, 0x9c, - 0x43, 0xcb, 0x6d, 0xb4, 0x4a, 0x7a, 0xc4, 0xb2, 0x49, 0xdb, 0x86, 0x47, 0xee, 0xbe, 0x67, 0x02, - 0xad, 0x2c, 0xd6, 0xa5, 0x46, 0x69, 0xbb, 0xae, 0x66, 0xe2, 0xcf, 0x42, 0xa6, 0xf6, 0xb6, 0x54, - 0x06, 0x68, 0x81, 0x0d, 0x46, 0xe8, 0x05, 0xfa, 0x8d, 0x41, 0xbf, 0xb6, 0xba, 0x93, 0xd3, 0xc6, - 0x63, 0x7c, 0xb2, 0x86, 0x96, 0x69, 0x97, 0x04, 0xc0, 0xf6, 0x2a, 0x85, 0xba, 0xd4, 0x58, 0xd2, - 0xd7, 0x62, 0xf3, 0x96, 0x5b, 0x89, 0x00, 0x8f, 0x30, 0xca, 0x8f, 0x8b, 0x68, 0xa5, 0xe9, 0x99, - 0x2d, 0xa3, 0x0b, 0x66, 0x64, 0x5b, 0x6e, 0x47, 0xfe, 0x02, 0x2d, 0xb1, 0xf8, 0x9b, 0x24, 0x24, - 0xdc, 0xc1, 0xd2, 0xf6, 0x9d, 0x8c, 0x79, 0x69, 0x18, 0x55, 0xff, 0xa8, 0xc3, 0x36, 0xa8, 0xca, - 0xd0, 0xcc, 0xe0, 0x47, 0xed, 0x2f, 0xc1, 0x08, 0x1f, 0x42, 0x48, 0x74, 0x39, 0x3e, 0x13, 0x8d, - 0xf6, 0x70, 0xca, 0x2a, 0x7f, 0x84, 0x8a, 0xd4, 0x07, 0x23, 0x76, 0x5e, 0x53, 0x67, 0x5d, 0x3e, - 0x55, 0x30, 0xae, 0xe5, 0x83, 0xa1, 0x5f, 0x8b, 0xc9, 0x8b, 0x6c, 0x85, 0x39, 0x95, 0x7c, 0x88, - 0xae, 0xd2, 0x90, 0x84, 0x11, 0xe5, 0x4e, 0x97, 0xb6, 0xb7, 0xe6, 0x21, 0xe5, 0x8a, 0x7a, 0x39, - 0xa6, 0xbd, 0x3a, 0x5c, 0xe3, 0x98, 0x50, 0xf9, 0x59, 0x42, 0x6b, 0x02, 0xfe, 0x43, 0x8b, 0x86, - 0xf2, 0x67, 0x63, 0x51, 0x52, 0xcf, 0x16, 0x25, 0xa6, 0xcd, 0x63, 0xb4, 0x1a, 0x9f, 0xb7, 0x94, - 0xec, 0x64, 0x22, 0xd4, 0x44, 0x57, 0xac, 0x10, 0x1c, 0x76, 0x3f, 0x0a, 0x8d, 0xd2, 0xf6, 0x4b, - 0x73, 0x78, 0xa3, 0xaf, 0xc4, 0xbc, 0x57, 0xf6, 0x18, 0x03, 0x1e, 0x12, 0x29, 0xdf, 0xe6, 0xbd, - 0x60, 0xc1, 0x93, 0xef, 0xa2, 0x6b, 0x94, 0x5f, 0x31, 0x30, 0xd9, 0xfd, 0x89, 0x2f, 0xf4, 0x8d, - 0x98, 0xe1, 0x5a, 0x2b, 0x23, 0xc3, 0x02, 0x52, 0x7e, 0x13, 0x95, 0x7d, 0x2f, 0x04, 0x37, 0xb4, - 0x88, 0x9d, 0x5c, 0xe5, 0x42, 0x63, 0x59, 0x97, 0x59, 0x21, 0x34, 0x05, 0x09, 0xce, 0x21, 0x95, - 0xef, 0x25, 0xb4, 0x3e, 0x21, 0x03, 0xf2, 0x37, 0xa3, 0x02, 0xbb, 0x6f, 0x13, 0xcb, 0xa1, 0x15, - 0x89, 0xbb, 0xff, 0xd6, 0x6c, 0xf7, 0x71, 0x56, 0x67, 0x2c, 0xad, 0x63, 0xd5, 0x39, 0xa4, 0xc6, - 0xb9, 0xa3, 0x78, 0x21, 0x08, 0x90, 0xcb, 0x56, 0x08, 0xa2, 0x9b, 0xff, 0x51, 0x21, 0x88, 0xa4, - 0xb3, 0x0b, 0x61, 0x20, 0xa1, 0xaa, 0x80, 0xbf, 0xef, 0xb9, 0x34, 0x72, 0x20, 0xc0, 0xf0, 0x18, - 0x02, 0x70, 0x0d, 0x90, 0x5f, 0x46, 0x4b, 0xc4, 0xb7, 0x1e, 0x04, 0x5e, 0xe4, 0xc7, 0x77, 0x29, - 0xbd, 0xe5, 0x3b, 0xcd, 0x3d, 0xbe, 0x8f, 0x53, 0x04, 0x43, 0x27, 0x16, 0x71, 0x6b, 0x33, 0xe8, - 0xe4, 0x1c, 0x9c, 0x22, 0xe4, 0x3a, 0x2a, 0xba, 0xc4, 0x81, 0x4a, 0x91, 0x23, 0x53, 0xdf, 0xf7, - 0x89, 0x03, 0x98, 0x4b, 0x64, 0x1d, 0x15, 0x22, 0xcb, 0xac, 0x5c, 0xe1, 0x80, 0x3b, 0x31, 0xa0, - 0x70, 0xb0, 0xb7, 0xfb, 0xbc, 0x5f, 0x7b, 0x71, 0xda, 0x4b, 0x10, 0x3e, 0xf1, 0x81, 0xaa, 0x07, - 0x7b, 0xbb, 0x98, 0x29, 0xf3, 0x6a, 0x17, 0x9c, 0xbc, 0x74, 0xd5, 0x2e, 0x58, 0x37, 0xa5, 0xda, - 0x7f, 0x90, 0x50, 0x5d, 0xc0, 0x35, 0x49, 0x40, 0x1c, 0x08, 0x21, 0xa0, 0xe7, 0x4d, 0x56, 0x1d, - 0x15, 0x8f, 0x2c, 0xd7, 0xe4, 0x77, 0x35, 0x13, 0xfe, 0x0f, 0x2c, 0xd7, 0xc4, 0x5c, 0x92, 0x26, - 0xa8, 0x30, 0x2d, 0x41, 0xca, 0x53, 0x09, 0xdd, 0x9e, 0x59, 0xad, 0x29, 0x87, 0x34, 0x35, 0xc9, - 0xef, 0xa0, 0xeb, 0x91, 0x4b, 0x23, 0x2b, 0x64, 0xcf, 0x57, 0xb6, 0xf3, 0xac, 0x0f, 0xfa, 0xb5, - 0xeb, 0x07, 0xa2, 0x08, 0xe7, 0xb1, 0xca, 0x4f, 0x8b, 0xb9, 0xfc, 0xf2, 0x3e, 0xf8, 0x00, 0xad, - 0x65, 0xda, 0x01, 0xa5, 0xfb, 0x23, 0x1b, 0x6e, 0xc6, 0x36, 0x64, 0xb5, 0x86, 0x00, 0x3c, 0xae, - 0x23, 0x7f, 0x8d, 0x56, 0xfc, 0x6c, 0xa8, 0xe3, 0xd2, 0xbe, 0x37, 0x47, 0x4a, 0x27, 0xa4, 0x4a, - 0x5f, 0x1b, 0xf4, 0x6b, 0x2b, 0x82, 0x00, 0x8b, 0xe7, 0xc8, 0x4d, 0x54, 0x26, 0xe9, 0xc0, 0xf2, - 0x90, 0xf5, 0xf2, 0x61, 0x1a, 0x1a, 0x49, 0xfb, 0xdb, 0x11, 0xa4, 0xcf, 0xc7, 0x76, 0x70, 0x4e, - 0x5f, 0xf9, 0x6b, 0x11, 0xad, 0x4f, 0x68, 0x0f, 0xf2, 0x36, 0x42, 0x66, 0x60, 0xf5, 0x20, 0xc8, - 0x04, 0x29, 0x6d, 0x73, 0xbb, 0xa9, 0x04, 0x67, 0x50, 0xf2, 0xe7, 0x08, 0x8d, 0xd8, 0xe3, 0x98, - 0xa8, 0xb3, 0x63, 0x92, 0x1f, 0xbf, 0xf4, 0x32, 0xe3, 0xcf, 0xec, 0x66, 0x18, 0x65, 0x8a, 0x4a, - 0x01, 0x50, 0x08, 0x7a, 0x60, 0xbe, 0xe7, 0x05, 0x95, 0x02, 0xaf, 0xa3, 0xb7, 0xe7, 0x08, 0xfa, - 0x58, 0x2b, 0xd3, 0xd7, 0x63, 0x97, 0x4a, 0x78, 0x44, 0x8c, 0xb3, 0xa7, 0xc8, 0x2d, 0xb4, 0x61, - 0x02, 0xc9, 0x98, 0xf9, 0x55, 0x04, 0x34, 0x04, 0x93, 0x77, 0xa8, 0x25, 0xfd, 0x76, 0x4c, 0xb0, - 0xb1, 0x3b, 0x09, 0x84, 0x27, 0xeb, 0x2a, 0xbf, 0x49, 0x68, 0x43, 0xb0, 0xec, 0x63, 0x70, 0x7c, - 0x9b, 0x84, 0x70, 0x01, 0xcf, 0xd1, 0xa1, 0xf0, 0x1c, 0xbd, 0x31, 0x47, 0xf8, 0x12, 0x23, 0xa7, - 0x3d, 0x4b, 0xca, 0xaf, 0x12, 0xba, 0x39, 0x51, 0xe3, 0x02, 0xda, 0xeb, 0x27, 0x62, 0x7b, 0x7d, - 0xf5, 0x1c, 0x7e, 0x4d, 0x69, 0xb3, 0x27, 0xd3, 0xbc, 0xe2, 0x4d, 0xe5, 0xff, 0x38, 0x3f, 0x28, - 0x7f, 0x8b, 0x63, 0x10, 0xa5, 0x17, 0xe0, 0x86, 0xd8, 0x51, 0x16, 0xcf, 0xd4, 0x51, 0xc6, 0x1a, - 0x6d, 0x61, 0xce, 0x46, 0x4b, 0xe9, 0xf9, 0x1a, 0xed, 0x21, 0x5a, 0x11, 0x5f, 0x9f, 0xe2, 0x19, - 0x3f, 0xe1, 0x38, 0x75, 0x4b, 0x78, 0x9d, 0x44, 0xa6, 0xfc, 0xec, 0x41, 0xe9, 0x65, 0x9e, 0x3d, - 0x28, 0x9d, 0x52, 0x14, 0xbf, 0x88, 0xb3, 0xc7, 0xc4, 0x38, 0x5f, 0xfc, 0xec, 0xc1, 0xbe, 0x8c, - 0xd9, 0x5f, 0xea, 0x13, 0x23, 0x99, 0x21, 0xd3, 0x2f, 0xe3, 0xfd, 0x44, 0x80, 0x47, 0x18, 0x5d, - 0x3f, 0x3e, 0xad, 0x2e, 0x9c, 0x9c, 0x56, 0x17, 0x9e, 0x9d, 0x56, 0x17, 0x9e, 0x0e, 0xaa, 0xd2, - 0xf1, 0xa0, 0x2a, 0x9d, 0x0c, 0xaa, 0xd2, 0xb3, 0x41, 0x55, 0xfa, 0x7d, 0x50, 0x95, 0xbe, 0xfb, - 0xa3, 0xba, 0xf0, 0xe9, 0xad, 0x59, 0xbf, 0xb3, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x0a, - 0x8b, 0x49, 0x9f, 0x11, 0x00, 0x00, + 0x99, 0x79, 0xbf, 0xf7, 0x9b, 0xf7, 0x31, 0xef, 0xcd, 0x5b, 0xa3, 0x77, 0x0f, 0xef, 0x52, 0xd5, + 0xf2, 0xb4, 0xc3, 0xa8, 0x03, 0x81, 0x0b, 0x21, 0x50, 0xad, 0x0f, 0xae, 0xe9, 0x05, 0x5a, 0x2c, + 0x20, 0xbe, 0xa5, 0x05, 0x40, 0xbd, 0x28, 0x30, 0x40, 0xeb, 0x6f, 0x11, 0xdb, 0xef, 0x91, 0xa6, + 0xd6, 0x05, 0x17, 0x02, 0x12, 0x82, 0xa9, 0xfa, 0x81, 0x17, 0x7a, 0xf2, 0xad, 0x11, 0x5a, 0x25, + 0xbe, 0xa5, 0x26, 0x68, 0x35, 0x41, 0xdf, 0x7c, 0xa5, 0x6b, 0x85, 0xbd, 0xa8, 0xa3, 0x1a, 0x9e, + 0xa3, 0x75, 0xbd, 0xae, 0xa7, 0x71, 0xa5, 0x4e, 0xf4, 0x98, 0xaf, 0xf8, 0x82, 0xff, 0x37, 0x22, + 0xbb, 0xa9, 0x64, 0x8e, 0x36, 0xbc, 0x80, 0x1d, 0x9b, 0x3f, 0xf0, 0xe6, 0x6b, 0x63, 0x8c, 0x43, + 0x8c, 0x9e, 0xe5, 0x42, 0xf0, 0x44, 0xf3, 0x0f, 0xbb, 0x6c, 0x83, 0x6a, 0x0e, 0x84, 0x64, 0x9a, + 0x96, 0x36, 0x4b, 0x2b, 0x88, 0xdc, 0xd0, 0x72, 0x60, 0x42, 0xe1, 0xf5, 0x7f, 0x53, 0xa0, 0x46, + 0x0f, 0x1c, 0x92, 0xd7, 0x53, 0xfe, 0x94, 0xd0, 0xfa, 0xb6, 0x6d, 0x7b, 0x06, 0x09, 0x2d, 0xcf, + 0xc5, 0x40, 0x23, 0x3b, 0x94, 0xef, 0xa1, 0x72, 0x12, 0x9b, 0xf7, 0x89, 0x6b, 0xda, 0x50, 0x91, + 0xea, 0x52, 0x63, 0x55, 0x7f, 0xe1, 0x68, 0x50, 0x5b, 0x1a, 0x0e, 0x6a, 0x65, 0x2c, 0x48, 0x71, + 0x0e, 0x2d, 0x77, 0xd0, 0x3a, 0xe9, 0x13, 0xcb, 0x26, 0x1d, 0x1b, 0x1e, 0xb9, 0x7b, 0x9e, 0x09, + 0xb4, 0xb2, 0x5c, 0x97, 0x1a, 0xa5, 0x66, 0x5d, 0xcd, 0xc4, 0x9f, 0x85, 0x4c, 0xed, 0x6f, 0xa9, + 0x0c, 0xd0, 0x06, 0x1b, 0x8c, 0xd0, 0x0b, 0xf4, 0x6b, 0xc3, 0x41, 0x6d, 0x7d, 0x3b, 0xa7, 0x8d, + 0x27, 0xf8, 0x64, 0x0d, 0xad, 0xd2, 0x1e, 0x09, 0x80, 0xed, 0x55, 0x0a, 0x75, 0xa9, 0xb1, 0xa2, + 0x6f, 0xc4, 0xe6, 0xad, 0xb6, 0x13, 0x01, 0x1e, 0x63, 0x94, 0x1f, 0x97, 0xd1, 0x5a, 0xcb, 0x33, + 0xdb, 0x46, 0x0f, 0xcc, 0xc8, 0xb6, 0xdc, 0xae, 0xfc, 0x05, 0x5a, 0x61, 0xf1, 0x37, 0x49, 0x48, + 0xb8, 0x83, 0xa5, 0xe6, 0x9d, 0x8c, 0x79, 0x69, 0x18, 0x55, 0xff, 0xb0, 0xcb, 0x36, 0xa8, 0xca, + 0xd0, 0xcc, 0xe0, 0x47, 0x9d, 0x2f, 0xc1, 0x08, 0x1f, 0x42, 0x48, 0x74, 0x39, 0x3e, 0x13, 0x8d, + 0xf7, 0x70, 0xca, 0x2a, 0x7f, 0x84, 0x8a, 0xd4, 0x07, 0x23, 0x76, 0x5e, 0x53, 0xe7, 0x5d, 0x3e, + 0x55, 0x30, 0xae, 0xed, 0x83, 0xa1, 0x5f, 0x89, 0xc9, 0x8b, 0x6c, 0x85, 0x39, 0x95, 0x7c, 0x80, + 0x2e, 0xd3, 0x90, 0x84, 0x11, 0xe5, 0x4e, 0x97, 0x9a, 0x5b, 0x8b, 0x90, 0x72, 0x45, 0xbd, 0x1c, + 0xd3, 0x5e, 0x1e, 0xad, 0x71, 0x4c, 0xa8, 0xfc, 0x2c, 0xa1, 0x0d, 0x01, 0xff, 0xa1, 0x45, 0x43, + 0xf9, 0xb3, 0x89, 0x28, 0xa9, 0xa7, 0x8b, 0x12, 0xd3, 0xe6, 0x31, 0x5a, 0x8f, 0xcf, 0x5b, 0x49, + 0x76, 0x32, 0x11, 0x6a, 0xa1, 0x4b, 0x56, 0x08, 0x0e, 0xbb, 0x1f, 0x85, 0x46, 0xa9, 0xf9, 0xd2, + 0x02, 0xde, 0xe8, 0x6b, 0x31, 0xef, 0xa5, 0x5d, 0xc6, 0x80, 0x47, 0x44, 0xca, 0xb7, 0x79, 0x2f, + 0x58, 0xf0, 0xe4, 0xbb, 0xe8, 0x0a, 0xe5, 0x57, 0x0c, 0x4c, 0x76, 0x7f, 0xe2, 0x0b, 0x7d, 0x2d, + 0x66, 0xb8, 0xd2, 0xce, 0xc8, 0xb0, 0x80, 0x94, 0xdf, 0x44, 0x65, 0xdf, 0x0b, 0xc1, 0x0d, 0x2d, + 0x62, 0x27, 0x57, 0xb9, 0xd0, 0x58, 0xd5, 0x65, 0x56, 0x08, 0x2d, 0x41, 0x82, 0x73, 0x48, 0xe5, + 0x7b, 0x09, 0x6d, 0x4e, 0xc9, 0x80, 0xfc, 0xcd, 0xb8, 0xc0, 0xee, 0xdb, 0xc4, 0x72, 0x68, 0x45, + 0xe2, 0xee, 0xbf, 0x35, 0xdf, 0x7d, 0x9c, 0xd5, 0x99, 0x48, 0xeb, 0x44, 0x75, 0x8e, 0xa8, 0x71, + 0xee, 0x28, 0x5e, 0x08, 0x02, 0xe4, 0xa2, 0x15, 0x82, 0xe8, 0xe6, 0x7f, 0x54, 0x08, 0x22, 0xe9, + 0xfc, 0x42, 0x18, 0x4a, 0xa8, 0x2a, 0xe0, 0xef, 0x7b, 0x2e, 0x8d, 0x1c, 0x08, 0x30, 0x3c, 0x86, + 0x00, 0x5c, 0x03, 0xe4, 0x97, 0xd1, 0x0a, 0xf1, 0xad, 0x07, 0x81, 0x17, 0xf9, 0xf1, 0x5d, 0x4a, + 0x6f, 0xf9, 0x76, 0x6b, 0x97, 0xef, 0xe3, 0x14, 0xc1, 0xd0, 0x89, 0x45, 0xdc, 0xda, 0x0c, 0x3a, + 0x39, 0x07, 0xa7, 0x08, 0xb9, 0x8e, 0x8a, 0x2e, 0x71, 0xa0, 0x52, 0xe4, 0xc8, 0xd4, 0xf7, 0x3d, + 0xe2, 0x00, 0xe6, 0x12, 0x59, 0x47, 0x85, 0xc8, 0x32, 0x2b, 0x97, 0x38, 0xe0, 0x4e, 0x0c, 0x28, + 0xec, 0xef, 0xee, 0x3c, 0x1f, 0xd4, 0x5e, 0x9c, 0xf5, 0x12, 0x84, 0x4f, 0x7c, 0xa0, 0xea, 0xfe, + 0xee, 0x0e, 0x66, 0xca, 0xbc, 0xda, 0x05, 0x27, 0x2f, 0x5c, 0xb5, 0x0b, 0xd6, 0xcd, 0xa8, 0xf6, + 0x1f, 0x24, 0x54, 0x17, 0x70, 0x2d, 0x12, 0x10, 0x07, 0x42, 0x08, 0xe8, 0x59, 0x93, 0x55, 0x47, + 0xc5, 0x43, 0xcb, 0x35, 0xf9, 0x5d, 0xcd, 0x84, 0xff, 0x03, 0xcb, 0x35, 0x31, 0x97, 0xa4, 0x09, + 0x2a, 0xcc, 0x4a, 0x90, 0xf2, 0x54, 0x42, 0xb7, 0xe7, 0x56, 0x6b, 0xca, 0x21, 0xcd, 0x4c, 0xf2, + 0x3b, 0xe8, 0x6a, 0xe4, 0xd2, 0xc8, 0x0a, 0xd9, 0xf3, 0x95, 0xed, 0x3c, 0x9b, 0xc3, 0x41, 0xed, + 0xea, 0xbe, 0x28, 0xc2, 0x79, 0xac, 0xf2, 0xd3, 0x72, 0x2e, 0xbf, 0xbc, 0x0f, 0x3e, 0x40, 0x1b, + 0x99, 0x76, 0x40, 0xe9, 0xde, 0xd8, 0x86, 0x1b, 0xb1, 0x0d, 0x59, 0xad, 0x11, 0x00, 0x4f, 0xea, + 0xc8, 0x5f, 0xa3, 0x35, 0x3f, 0x1b, 0xea, 0xb8, 0xb4, 0xef, 0x2d, 0x90, 0xd2, 0x29, 0xa9, 0xd2, + 0x37, 0x86, 0x83, 0xda, 0x9a, 0x20, 0xc0, 0xe2, 0x39, 0x72, 0x0b, 0x95, 0x49, 0x3a, 0xb0, 0x3c, + 0x64, 0xbd, 0x7c, 0x94, 0x86, 0x46, 0xd2, 0xfe, 0xb6, 0x05, 0xe9, 0xf3, 0x89, 0x1d, 0x9c, 0xd3, + 0x57, 0xfe, 0x5a, 0x46, 0x9b, 0x53, 0xda, 0x83, 0xdc, 0x44, 0xc8, 0x0c, 0xac, 0x3e, 0x04, 0x99, + 0x20, 0xa5, 0x6d, 0x6e, 0x27, 0x95, 0xe0, 0x0c, 0x4a, 0xfe, 0x1c, 0xa1, 0x31, 0x7b, 0x1c, 0x13, + 0x75, 0x7e, 0x4c, 0xf2, 0xe3, 0x97, 0x5e, 0x66, 0xfc, 0x99, 0xdd, 0x0c, 0xa3, 0x4c, 0x51, 0x29, + 0x00, 0x0a, 0x41, 0x1f, 0xcc, 0xf7, 0xbc, 0xa0, 0x52, 0xe0, 0x75, 0xf4, 0xf6, 0x02, 0x41, 0x9f, + 0x68, 0x65, 0xfa, 0x66, 0xec, 0x52, 0x09, 0x8f, 0x89, 0x71, 0xf6, 0x14, 0xb9, 0x8d, 0xae, 0x9b, + 0x40, 0x32, 0x66, 0x7e, 0x15, 0x01, 0x0d, 0xc1, 0xe4, 0x1d, 0x6a, 0x45, 0xbf, 0x1d, 0x13, 0x5c, + 0xdf, 0x99, 0x06, 0xc2, 0xd3, 0x75, 0x95, 0xdf, 0x24, 0x74, 0x5d, 0xb0, 0xec, 0x63, 0x70, 0x7c, + 0x9b, 0x84, 0x70, 0x0e, 0xcf, 0xd1, 0x81, 0xf0, 0x1c, 0xbd, 0xb1, 0x40, 0xf8, 0x12, 0x23, 0x67, + 0x3d, 0x4b, 0xca, 0xaf, 0x12, 0xba, 0x31, 0x55, 0xe3, 0x1c, 0xda, 0xeb, 0x27, 0x62, 0x7b, 0x7d, + 0xf5, 0x0c, 0x7e, 0xcd, 0x68, 0xb3, 0xc7, 0xb3, 0xbc, 0xe2, 0x4d, 0xe5, 0xff, 0x38, 0x3f, 0x28, + 0x7f, 0x8b, 0x63, 0x10, 0xa5, 0xe7, 0xe0, 0x86, 0xd8, 0x51, 0x96, 0x4f, 0xd5, 0x51, 0x26, 0x1a, + 0x6d, 0x61, 0xc1, 0x46, 0x4b, 0xe9, 0xd9, 0x1a, 0xed, 0x01, 0x5a, 0x13, 0x5f, 0x9f, 0xe2, 0x29, + 0x3f, 0xe1, 0x38, 0x75, 0x5b, 0x78, 0x9d, 0x44, 0xa6, 0xfc, 0xec, 0x41, 0xe9, 0x45, 0x9e, 0x3d, + 0x28, 0x9d, 0x51, 0x14, 0xbf, 0x88, 0xb3, 0xc7, 0xd4, 0x38, 0x9f, 0xff, 0xec, 0xc1, 0xbe, 0x8c, + 0xd9, 0x5f, 0xea, 0x13, 0x23, 0x99, 0x21, 0xd3, 0x2f, 0xe3, 0xbd, 0x44, 0x80, 0xc7, 0x18, 0x5d, + 0x3f, 0x3a, 0xa9, 0x2e, 0x1d, 0x9f, 0x54, 0x97, 0x9e, 0x9d, 0x54, 0x97, 0x9e, 0x0e, 0xab, 0xd2, + 0xd1, 0xb0, 0x2a, 0x1d, 0x0f, 0xab, 0xd2, 0xb3, 0x61, 0x55, 0xfa, 0x7d, 0x58, 0x95, 0xbe, 0xfb, + 0xa3, 0xba, 0xf4, 0xe9, 0xad, 0x79, 0xbf, 0xb3, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x93, 0xfa, + 0x8c, 0x28, 0x9f, 0x11, 0x00, 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { diff --git a/staging/src/k8s.io/api/resource/v1alpha1/generated.proto b/staging/src/k8s.io/api/resource/v1alpha2/generated.proto similarity index 99% rename from staging/src/k8s.io/api/resource/v1alpha1/generated.proto rename to staging/src/k8s.io/api/resource/v1alpha2/generated.proto index 2e814d155b3..7f50d46d91b 100644 --- a/staging/src/k8s.io/api/resource/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/resource/v1alpha2/generated.proto @@ -19,7 +19,7 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.resource.v1alpha1; +package k8s.io.api.resource.v1alpha2; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/resource/v1alpha1"; +option go_package = "k8s.io/api/resource/v1alpha2"; // AllocationResult contains attributed of an allocated resource. message AllocationResult { diff --git a/staging/src/k8s.io/api/resource/v1alpha1/register.go b/staging/src/k8s.io/api/resource/v1alpha2/register.go similarity index 98% rename from staging/src/k8s.io/api/resource/v1alpha1/register.go rename to staging/src/k8s.io/api/resource/v1alpha2/register.go index 8245b9aee5d..4a6c4381f1c 100644 --- a/staging/src/k8s.io/api/resource/v1alpha1/register.go +++ b/staging/src/k8s.io/api/resource/v1alpha2/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "resource.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { diff --git a/staging/src/k8s.io/api/resource/v1alpha1/types.go b/staging/src/k8s.io/api/resource/v1alpha2/types.go similarity index 99% rename from staging/src/k8s.io/api/resource/v1alpha1/types.go rename to staging/src/k8s.io/api/resource/v1alpha2/types.go index af570384039..7eb76242988 100644 --- a/staging/src/k8s.io/api/resource/v1alpha1/types.go +++ b/staging/src/k8s.io/api/resource/v1alpha2/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" diff --git a/staging/src/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go similarity index 99% rename from staging/src/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go rename to staging/src/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go index 4c2d1b7b23d..b1ec3e15118 100644 --- a/staging/src/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more diff --git a/staging/src/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go similarity index 99% rename from staging/src/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go rename to staging/src/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go index c00fbfd1d4e..bc6772bdb59 100644 --- a/staging/src/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" diff --git a/staging/src/k8s.io/api/roundtrip_test.go b/staging/src/k8s.io/api/roundtrip_test.go index 1c326766fe5..59fa7f06d16 100644 --- a/staging/src/k8s.io/api/roundtrip_test.go +++ b/staging/src/k8s.io/api/roundtrip_test.go @@ -66,7 +66,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -129,7 +129,7 @@ var groups = []runtime.SchemeBuilder{ rbacv1alpha1.SchemeBuilder, rbacv1beta1.SchemeBuilder, rbacv1.SchemeBuilder, - resourcev1alpha1.SchemeBuilder, + resourcev1alpha2.SchemeBuilder, schedulingv1alpha1.SchemeBuilder, schedulingv1beta1.SchemeBuilder, schedulingv1.SchemeBuilder, diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.json b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.json similarity index 96% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.json rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.json index 5590555162c..f06bf8bd6ac 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.json +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.json @@ -1,6 +1,6 @@ { "kind": "PodScheduling", - "apiVersion": "resource.k8s.io/v1alpha1", + "apiVersion": "resource.k8s.io/v1alpha2", "metadata": { "name": "nameValue", "generateName": "generateNameValue", diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.pb b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.pb similarity index 90% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.pb rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.pb index 4dc9623d52c..fe4a7b6753a 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.pb and b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.yaml b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.yaml similarity index 96% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.yaml rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.yaml index 6f5627deec8..f9a4e5ef577 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.PodScheduling.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.PodScheduling.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: PodScheduling metadata: annotations: diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.json b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.json similarity index 98% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.json rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.json index f1501251c69..a0473d05ffd 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.json +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.json @@ -1,6 +1,6 @@ { "kind": "ResourceClaim", - "apiVersion": "resource.k8s.io/v1alpha1", + "apiVersion": "resource.k8s.io/v1alpha2", "metadata": { "name": "nameValue", "generateName": "generateNameValue", diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.pb b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.pb similarity index 93% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.pb rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.pb index c50b8f0126d..9ede6c011c3 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.pb and b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.yaml b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.yaml similarity index 97% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.yaml rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.yaml index d7b52c4e160..7e150cc0883 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaim.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaim.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaim metadata: annotations: diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.json b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.json similarity index 98% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.json rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.json index 12358250d4f..b1e363fa351 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.json +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.json @@ -1,6 +1,6 @@ { "kind": "ResourceClaimTemplate", - "apiVersion": "resource.k8s.io/v1alpha1", + "apiVersion": "resource.k8s.io/v1alpha2", "metadata": { "name": "nameValue", "generateName": "generateNameValue", diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.pb b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.pb similarity index 93% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.pb rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.pb index 7177e570fcd..b251edf164d 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.pb and b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.yaml b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.yaml similarity index 98% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.yaml rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.yaml index 6204ec79ccd..63d726ed623 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClaimTemplate.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClaimTemplate.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaimTemplate metadata: annotations: diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.json b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.json similarity index 97% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.json rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.json index a40268178a9..90738786b4b 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.json +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.json @@ -1,6 +1,6 @@ { "kind": "ResourceClass", - "apiVersion": "resource.k8s.io/v1alpha1", + "apiVersion": "resource.k8s.io/v1alpha2", "metadata": { "name": "nameValue", "generateName": "generateNameValue", diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.pb b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.pb similarity index 92% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.pb rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.pb index fef3714f662..99f93ce8e17 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.pb and b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.yaml b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.yaml similarity index 97% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.yaml rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.yaml index 251d6a2bfd1..8dce3e3cbbc 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.ResourceClass.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.ResourceClass.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 driverName: driverNameValue kind: ResourceClass metadata: diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.json b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.json similarity index 92% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.json rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.json index 13e0bdbcbef..bcfd157fa93 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.json +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.json @@ -1,6 +1,6 @@ { "kind": "Status", - "apiVersion": "resource.k8s.io/v1alpha1", + "apiVersion": "resource.k8s.io/v1alpha2", "metadata": { "selfLink": "selfLinkValue", "resourceVersion": "resourceVersionValue", diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.pb b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.pb similarity index 84% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.pb rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.pb index ac73b5ccdfb..66b5e430af7 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.pb and b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.yaml b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.yaml similarity index 91% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.yaml rename to staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.yaml index f78f424e9c9..5507b94519f 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha1.Status.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha2.Status.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 code: 6 details: causes: diff --git a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go index 6d14312d858..0bb14401ae2 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -11364,7 +11364,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha1.AllocationResult +- name: io.k8s.api.resource.v1alpha2.AllocationResult map: fields: - name: availableOnNodes @@ -11376,7 +11376,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: shareable type: scalar: boolean -- name: io.k8s.api.resource.v1alpha1.PodScheduling +- name: io.k8s.api.resource.v1alpha2.PodScheduling map: fields: - name: apiVersion @@ -11391,13 +11391,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingSpec + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingStatus default: {} -- name: io.k8s.api.resource.v1alpha1.PodSchedulingSpec +- name: io.k8s.api.resource.v1alpha2.PodSchedulingSpec map: fields: - name: potentialNodes @@ -11409,18 +11409,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: selectedNode type: scalar: string -- name: io.k8s.api.resource.v1alpha1.PodSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.PodSchedulingStatus map: fields: - name: resourceClaims type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus elementRelationship: associative keys: - name -- name: io.k8s.api.resource.v1alpha1.ResourceClaim +- name: io.k8s.api.resource.v1alpha2.ResourceClaim map: fields: - name: apiVersion @@ -11435,13 +11435,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference map: fields: - name: apiGroup @@ -11459,7 +11459,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference map: fields: - name: apiGroup @@ -11473,7 +11473,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus map: fields: - name: name @@ -11485,7 +11485,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec map: fields: - name: allocationMode @@ -11493,17 +11493,17 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - name: resourceClassName type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus map: fields: - name: allocation type: - namedType: io.k8s.api.resource.v1alpha1.AllocationResult + namedType: io.k8s.api.resource.v1alpha2.AllocationResult - name: deallocationRequested type: scalar: boolean @@ -11514,11 +11514,11 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference elementRelationship: associative keys: - uid -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplate +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate map: fields: - name: apiVersion @@ -11533,9 +11533,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec map: fields: - name: metadata @@ -11544,9 +11544,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClass +- name: io.k8s.api.resource.v1alpha2.ResourceClass map: fields: - name: apiVersion @@ -11565,11 +11565,11 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - name: suitableNodes type: namedType: io.k8s.api.core.v1.NodeSelector -- name: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference map: fields: - name: apiGroup diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go index a2ad3adf1af..ec97dc83ce5 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podscheduling.go similarity index 96% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podscheduling.go index 44890c2d92e..c69e9bacc40 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podscheduling.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -43,7 +43,7 @@ func PodScheduling(name, namespace string) *PodSchedulingApplyConfiguration { b.WithName(name) b.WithNamespace(namespace) b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -58,20 +58,20 @@ func PodScheduling(name, namespace string) *PodSchedulingApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { +func ExtractPodScheduling(podScheduling *resourcev1alpha2.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { return extractPodScheduling(podScheduling, fieldManager, "") } // ExtractPodSchedulingStatus is the same as ExtractPodScheduling except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSchedulingStatus(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { +func ExtractPodSchedulingStatus(podScheduling *resourcev1alpha2.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { return extractPodScheduling(podScheduling, fieldManager, "status") } -func extractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string, subresource string) (*PodSchedulingApplyConfiguration, error) { +func extractPodScheduling(podScheduling *resourcev1alpha2.PodScheduling, fieldManager string, subresource string) (*PodSchedulingApplyConfiguration, error) { b := &PodSchedulingApplyConfiguration{} - err := managedfields.ExtractInto(podScheduling, internal.Parser().Type("io.k8s.api.resource.v1alpha1.PodScheduling"), fieldManager, b, subresource) + err := managedfields.ExtractInto(podScheduling, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodScheduling"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldMa b.WithNamespace(podScheduling.Namespace) b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingspec.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingspec.go index 9fd3c1ee53e..1cfb016150a 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingspec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // PodSchedulingSpecApplyConfiguration represents an declarative configuration of the PodSchedulingSpec type for use // with apply. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingstatus.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingstatus.go index 5744f6c3eb2..6ee383817e3 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // PodSchedulingStatusApplyConfiguration represents an declarative configuration of the PodSchedulingStatus type for use // with apply. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go similarity index 96% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go index f94811a9b10..6c219f837b6 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -43,7 +43,7 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -58,20 +58,20 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "") } // ExtractResourceClaimStatus is the same as ExtractResourceClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "status") } -func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { +func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { b := &ResourceClaimApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaim"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldMa b.WithNamespace(resourceClaim.Namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go index 477099cd7a0..41bb9e9a141 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( types "k8s.io/apimachinery/pkg/types" diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go index d7b25d75eb4..27820ede60c 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use // with apply. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go index 35ff34abab2..e74679aed3a 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use // with apply. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go similarity index 93% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go index d3261904627..0c73e64e9ed 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" ) // ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use @@ -27,7 +27,7 @@ import ( type ResourceClaimSpecApplyConfiguration struct { ResourceClassName *string `json:"resourceClassName,omitempty"` ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - AllocationMode *resourcev1alpha1.AllocationMode `json:"allocationMode,omitempty"` + AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` } // ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with @@ -55,7 +55,7 @@ func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceC // WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AllocationMode field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha1.AllocationMode) *ResourceClaimSpecApplyConfiguration { +func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { b.AllocationMode = &value return b } diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go index e2283f8b07f..c6fa610906f 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use // with apply. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go similarity index 96% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go index e3c602cb652..fc2209b8f09 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -57,20 +57,20 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") } // ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") } -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { b := &ResourceClaimTemplateApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaimTemplate"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.Resour b.WithNamespace(resourceClaimTemplate.Namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go index 88058e066e1..2f38ea03668 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go similarity index 96% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go index 5f980acdb1f..724c9e88e00 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -44,7 +44,7 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { b := &ResourceClassApplyConfiguration{} b.WithName(name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -59,27 +59,27 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "") } // ExtractResourceClassStatus is the same as ExtractResourceClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClassStatus(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "status") } -func extractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { +func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { b := &ResourceClassApplyConfiguration{} - err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClass"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(resourceClass.Name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go similarity index 99% rename from staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go rename to staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go index b03a9a6da4b..d67e4d39771 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use // with apply. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/utils.go b/staging/src/k8s.io/client-go/applyconfigurations/utils.go index 2d8b1daad5f..90de5fbc2fa 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/utils.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/utils.go @@ -58,7 +58,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -107,7 +107,7 @@ import ( applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - applyconfigurationsresourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" @@ -1448,35 +1448,35 @@ func ForKind(kind schema.GroupVersionKind) interface{} { case rbacv1beta1.SchemeGroupVersion.WithKind("Subject"): return &applyconfigurationsrbacv1beta1.SubjectApplyConfiguration{} - // Group=resource.k8s.io, Version=v1alpha1 - case resourcev1alpha1.SchemeGroupVersion.WithKind("AllocationResult"): - return &applyconfigurationsresourcev1alpha1.AllocationResultApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("PodScheduling"): - return &applyconfigurationsresourcev1alpha1.PodSchedulingApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("PodSchedulingSpec"): - return &applyconfigurationsresourcev1alpha1.PodSchedulingSpecApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("PodSchedulingStatus"): - return &applyconfigurationsresourcev1alpha1.PodSchedulingStatusApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaim"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimConsumerReferenceApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaimParametersReference"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimParametersReferenceApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaimSchedulingStatus"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimSchedulingStatusApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaimSpec"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimSpecApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaimStatus"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimStatusApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaimTemplate"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimTemplateApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClaimTemplateSpec"): - return &applyconfigurationsresourcev1alpha1.ResourceClaimTemplateSpecApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClass"): - return &applyconfigurationsresourcev1alpha1.ResourceClassApplyConfiguration{} - case resourcev1alpha1.SchemeGroupVersion.WithKind("ResourceClassParametersReference"): - return &applyconfigurationsresourcev1alpha1.ResourceClassParametersReferenceApplyConfiguration{} + // Group=resource.k8s.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithKind("AllocationResult"): + return &resourcev1alpha2.AllocationResultApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("PodScheduling"): + return &resourcev1alpha2.PodSchedulingApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingSpec"): + return &resourcev1alpha2.PodSchedulingSpecApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingStatus"): + return &resourcev1alpha2.PodSchedulingStatusApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim"): + return &resourcev1alpha2.ResourceClaimApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): + return &resourcev1alpha2.ResourceClaimConsumerReferenceApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimParametersReference"): + return &resourcev1alpha2.ResourceClaimParametersReferenceApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimSchedulingStatus"): + return &resourcev1alpha2.ResourceClaimSchedulingStatusApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimSpec"): + return &resourcev1alpha2.ResourceClaimSpecApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimStatus"): + return &resourcev1alpha2.ResourceClaimStatusApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimTemplate"): + return &resourcev1alpha2.ResourceClaimTemplateApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimTemplateSpec"): + return &resourcev1alpha2.ResourceClaimTemplateSpecApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClass"): + return &resourcev1alpha2.ResourceClassApplyConfiguration{} + case v1alpha2.SchemeGroupVersion.WithKind("ResourceClassParametersReference"): + return &resourcev1alpha2.ResourceClassParametersReferenceApplyConfiguration{} // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithKind("PriorityClass"): diff --git a/staging/src/k8s.io/client-go/informers/generic.go b/staging/src/k8s.io/client-go/informers/generic.go index ba4d4f429a0..ecdc82c8684 100644 --- a/staging/src/k8s.io/client-go/informers/generic.go +++ b/staging/src/k8s.io/client-go/informers/generic.go @@ -59,7 +59,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -348,15 +348,15 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil - // Group=resource.k8s.io, Version=v1alpha1 - case resourcev1alpha1.SchemeGroupVersion.WithResource("podschedulings"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha1().PodSchedulings().Informer()}, nil - case resourcev1alpha1.SchemeGroupVersion.WithResource("resourceclaims"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha1().ResourceClaims().Informer()}, nil - case resourcev1alpha1.SchemeGroupVersion.WithResource("resourceclaimtemplates"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha1().ResourceClaimTemplates().Informer()}, nil - case resourcev1alpha1.SchemeGroupVersion.WithResource("resourceclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha1().ResourceClasses().Informer()}, nil + // Group=resource.k8s.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("podschedulings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulings().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimTemplates().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClasses().Informer()}, nil // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): diff --git a/staging/src/k8s.io/client-go/informers/resource/interface.go b/staging/src/k8s.io/client-go/informers/resource/interface.go index 6cf95b0d473..3fcce8ae9dc 100644 --- a/staging/src/k8s.io/client-go/informers/resource/interface.go +++ b/staging/src/k8s.io/client-go/informers/resource/interface.go @@ -20,13 +20,13 @@ package resource import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1alpha1 "k8s.io/client-go/informers/resource/v1alpha1" + v1alpha2 "k8s.io/client-go/informers/resource/v1alpha2" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface } type group struct { @@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/interface.go similarity index 99% rename from staging/src/k8s.io/client-go/informers/resource/v1alpha1/interface.go rename to staging/src/k8s.io/client-go/informers/resource/v1alpha2/interface.go index 4449dfa652e..cde5ae86cbd 100644 --- a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/interface.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" diff --git a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/podscheduling.go b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/podscheduling.go similarity index 85% rename from staging/src/k8s.io/client-go/informers/resource/v1alpha1/podscheduling.go rename to staging/src/k8s.io/client-go/informers/resource/v1alpha2/podscheduling.go index 87b4c34e15d..9ecf3c434a8 100644 --- a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/podscheduling.go +++ b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/podscheduling.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" time "time" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/resource/v1alpha1" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodSchedulings. type PodSchedulingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.PodSchedulingLister + Lister() v1alpha2.PodSchedulingLister } type podSchedulingInformer struct { @@ -62,16 +62,16 @@ func NewFilteredPodSchedulingInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().PodSchedulings(namespace).List(context.TODO(), options) + return client.ResourceV1alpha2().PodSchedulings(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().PodSchedulings(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha2().PodSchedulings(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha1.PodScheduling{}, + &resourcev1alpha2.PodScheduling{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podSchedulingInformer) defaultInformer(client kubernetes.Interface, res } func (f *podSchedulingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha1.PodScheduling{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha2.PodScheduling{}, f.defaultInformer) } -func (f *podSchedulingInformer) Lister() v1alpha1.PodSchedulingLister { - return v1alpha1.NewPodSchedulingLister(f.Informer().GetIndexer()) +func (f *podSchedulingInformer) Lister() v1alpha2.PodSchedulingLister { + return v1alpha2.NewPodSchedulingLister(f.Informer().GetIndexer()) } diff --git a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclaim.go b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go similarity index 85% rename from staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclaim.go rename to staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go index 10150c02078..3af93689191 100644 --- a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclaim.go +++ b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" time "time" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/resource/v1alpha1" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaims. type ResourceClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ResourceClaimLister + Lister() v1alpha2.ResourceClaimLister } type resourceClaimInformer struct { @@ -62,16 +62,16 @@ func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().ResourceClaims(namespace).List(context.TODO(), options) + return client.ResourceV1alpha2().ResourceClaims(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().ResourceClaims(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha2().ResourceClaims(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha1.ResourceClaim{}, + &resourcev1alpha2.ResourceClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha1.ResourceClaim{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha2.ResourceClaim{}, f.defaultInformer) } -func (f *resourceClaimInformer) Lister() v1alpha1.ResourceClaimLister { - return v1alpha1.NewResourceClaimLister(f.Informer().GetIndexer()) +func (f *resourceClaimInformer) Lister() v1alpha2.ResourceClaimLister { + return v1alpha2.NewResourceClaimLister(f.Informer().GetIndexer()) } diff --git a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclaimtemplate.go b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go similarity index 86% rename from staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclaimtemplate.go rename to staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go index cdffa49db7f..13f4ad835cf 100644 --- a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclaimtemplate.go +++ b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" time "time" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/resource/v1alpha1" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaimTemplates. type ResourceClaimTemplateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ResourceClaimTemplateLister + Lister() v1alpha2.ResourceClaimTemplateLister } type resourceClaimTemplateInformer struct { @@ -62,16 +62,16 @@ func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, names if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().ResourceClaimTemplates(namespace).List(context.TODO(), options) + return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha1.ResourceClaimTemplate{}, + &resourcev1alpha2.ResourceClaimTemplate{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interf } func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha1.ResourceClaimTemplate{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha2.ResourceClaimTemplate{}, f.defaultInformer) } -func (f *resourceClaimTemplateInformer) Lister() v1alpha1.ResourceClaimTemplateLister { - return v1alpha1.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) +func (f *resourceClaimTemplateInformer) Lister() v1alpha2.ResourceClaimTemplateLister { + return v1alpha2.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) } diff --git a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclass.go b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go similarity index 85% rename from staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclass.go rename to staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go index e6faa5d02ea..cb76d78fe49 100644 --- a/staging/src/k8s.io/client-go/informers/resource/v1alpha1/resourceclass.go +++ b/staging/src/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" time "time" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/resource/v1alpha1" + v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClasses. type ResourceClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ResourceClassLister + Lister() v1alpha2.ResourceClassLister } type resourceClassInformer struct { @@ -61,16 +61,16 @@ func NewFilteredResourceClassInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().ResourceClasses().List(context.TODO(), options) + return client.ResourceV1alpha2().ResourceClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha1().ResourceClasses().Watch(context.TODO(), options) + return client.ResourceV1alpha2().ResourceClasses().Watch(context.TODO(), options) }, }, - &resourcev1alpha1.ResourceClass{}, + &resourcev1alpha2.ResourceClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *resourceClassInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha1.ResourceClass{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha2.ResourceClass{}, f.defaultInformer) } -func (f *resourceClassInformer) Lister() v1alpha1.ResourceClassLister { - return v1alpha1.NewResourceClassLister(f.Informer().GetIndexer()) +func (f *resourceClassInformer) Lister() v1alpha2.ResourceClassLister { + return v1alpha2.NewResourceClassLister(f.Informer().GetIndexer()) } diff --git a/staging/src/k8s.io/client-go/kubernetes/clientset.go b/staging/src/k8s.io/client-go/kubernetes/clientset.go index 9eecbb2a805..4c329415494 100644 --- a/staging/src/k8s.io/client-go/kubernetes/clientset.go +++ b/staging/src/k8s.io/client-go/kubernetes/clientset.go @@ -66,7 +66,7 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - resourcev1alpha1 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -122,7 +122,7 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface + ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface @@ -177,7 +177,7 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - resourceV1alpha1 *resourcev1alpha1.ResourceV1alpha1Client + resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client @@ -401,9 +401,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } -// ResourceV1alpha1 retrieves the ResourceV1alpha1Client -func (c *Clientset) ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface { - return c.resourceV1alpha1 +// ResourceV1alpha2 retrieves the ResourceV1alpha2Client +func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { + return c.resourceV1alpha2 } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -652,7 +652,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.resourceV1alpha1, err = resourcev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -744,7 +744,7 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.resourceV1alpha1 = resourcev1alpha1.New(c) + cs.resourceV1alpha2 = resourcev1alpha2.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go index f4d621941e9..03c7c8ae11e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -110,8 +110,8 @@ import ( fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" - resourcev1alpha1 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1" - fakeresourcev1alpha1 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake" + resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" + fakeresourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" fakeschedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1/fake" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" @@ -392,9 +392,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} } -// ResourceV1alpha1 retrieves the ResourceV1alpha1Client -func (c *Clientset) ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface { - return &fakeresourcev1alpha1.FakeResourceV1alpha1{Fake: &c.Fake} +// ResourceV1alpha2 retrieves the ResourceV1alpha2Client +func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { + return &fakeresourcev1alpha2.FakeResourceV1alpha2{Fake: &c.Fake} } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/register.go b/staging/src/k8s.io/client-go/kubernetes/fake/register.go index 30f42e25dae..5c85e6be0f5 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/register.go @@ -62,7 +62,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -123,7 +123,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha1.AddToScheme, + resourcev1alpha2.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go index e43780529b2..5ececbbaf2d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go @@ -62,7 +62,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -123,7 +123,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha1.AddToScheme, + resourcev1alpha2.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go similarity index 97% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go index df51baa4d4c..baaf2d98537 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha1 +package v1alpha2 diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go similarity index 100% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/doc.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_podscheduling.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podscheduling.go similarity index 73% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_podscheduling.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podscheduling.go index 3e43fee49a7..958b50d4504 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_podscheduling.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podscheduling.go @@ -23,40 +23,40 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" testing "k8s.io/client-go/testing" ) // FakePodSchedulings implements PodSchedulingInterface type FakePodSchedulings struct { - Fake *FakeResourceV1alpha1 + Fake *FakeResourceV1alpha2 ns string } -var podschedulingsResource = v1alpha1.SchemeGroupVersion.WithResource("podschedulings") +var podschedulingsResource = v1alpha2.SchemeGroupVersion.WithResource("podschedulings") -var podschedulingsKind = v1alpha1.SchemeGroupVersion.WithKind("PodScheduling") +var podschedulingsKind = v1alpha2.SchemeGroupVersion.WithKind("PodScheduling") // Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any. -func (c *FakePodSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodScheduling, err error) { +func (c *FakePodSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodScheduling, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(podschedulingsResource, c.ns, name), &v1alpha1.PodScheduling{}) + Invokes(testing.NewGetAction(podschedulingsResource, c.ns, name), &v1alpha2.PodScheduling{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PodScheduling), err + return obj.(*v1alpha2.PodScheduling), err } // List takes label and field selectors, and returns the list of PodSchedulings that match those selectors. -func (c *FakePodSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodSchedulingList, err error) { +func (c *FakePodSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(podschedulingsResource, podschedulingsKind, c.ns, opts), &v1alpha1.PodSchedulingList{}) + Invokes(testing.NewListAction(podschedulingsResource, podschedulingsKind, c.ns, opts), &v1alpha2.PodSchedulingList{}) if obj == nil { return nil, err @@ -66,8 +66,8 @@ func (c *FakePodSchedulings) List(ctx context.Context, opts v1.ListOptions) (res if label == nil { label = labels.Everything() } - list := &v1alpha1.PodSchedulingList{ListMeta: obj.(*v1alpha1.PodSchedulingList).ListMeta} - for _, item := range obj.(*v1alpha1.PodSchedulingList).Items { + list := &v1alpha2.PodSchedulingList{ListMeta: obj.(*v1alpha2.PodSchedulingList).ListMeta} + for _, item := range obj.(*v1alpha2.PodSchedulingList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -83,43 +83,43 @@ func (c *FakePodSchedulings) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a podScheduling and creates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *FakePodSchedulings) Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (result *v1alpha1.PodScheduling, err error) { +func (c *FakePodSchedulings) Create(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.CreateOptions) (result *v1alpha2.PodScheduling, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(podschedulingsResource, c.ns, podScheduling), &v1alpha1.PodScheduling{}) + Invokes(testing.NewCreateAction(podschedulingsResource, c.ns, podScheduling), &v1alpha2.PodScheduling{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PodScheduling), err + return obj.(*v1alpha2.PodScheduling), err } // Update takes the representation of a podScheduling and updates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *FakePodSchedulings) Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { +func (c *FakePodSchedulings) Update(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (result *v1alpha2.PodScheduling, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podschedulingsResource, c.ns, podScheduling), &v1alpha1.PodScheduling{}) + Invokes(testing.NewUpdateAction(podschedulingsResource, c.ns, podScheduling), &v1alpha2.PodScheduling{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PodScheduling), err + return obj.(*v1alpha2.PodScheduling), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) { +func (c *FakePodSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (*v1alpha2.PodScheduling, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podschedulingsResource, "status", c.ns, podScheduling), &v1alpha1.PodScheduling{}) + Invokes(testing.NewUpdateSubresourceAction(podschedulingsResource, "status", c.ns, podScheduling), &v1alpha2.PodScheduling{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PodScheduling), err + return obj.(*v1alpha2.PodScheduling), err } // Delete takes name of the podScheduling and deletes it. Returns an error if one occurs. func (c *FakePodSchedulings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podschedulingsResource, c.ns, name, opts), &v1alpha1.PodScheduling{}) + Invokes(testing.NewDeleteActionWithOptions(podschedulingsResource, c.ns, name, opts), &v1alpha2.PodScheduling{}) return err } @@ -128,23 +128,23 @@ func (c *FakePodSchedulings) Delete(ctx context.Context, name string, opts v1.De func (c *FakePodSchedulings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(podschedulingsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.PodSchedulingList{}) + _, err := c.Fake.Invokes(action, &v1alpha2.PodSchedulingList{}) return err } // Patch applies the patch and returns the patched podScheduling. -func (c *FakePodSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) { +func (c *FakePodSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodScheduling, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodScheduling{}) + Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, name, pt, data, subresources...), &v1alpha2.PodScheduling{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PodScheduling), err + return obj.(*v1alpha2.PodScheduling), err } // Apply takes the given apply declarative configuration, applies it and returns the applied podScheduling. -func (c *FakePodSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { +func (c *FakePodSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) { if podScheduling == nil { return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") } @@ -157,17 +157,17 @@ func (c *FakePodSchedulings) Apply(ctx context.Context, podScheduling *resourcev return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.PodScheduling{}) + Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.PodScheduling{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PodScheduling), err + return obj.(*v1alpha2.PodScheduling), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { +func (c *FakePodSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) { if podScheduling == nil { return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") } @@ -180,10 +180,10 @@ func (c *FakePodSchedulings) ApplyStatus(ctx context.Context, podScheduling *res return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha1.PodScheduling{}) + Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.PodScheduling{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PodScheduling), err + return obj.(*v1alpha2.PodScheduling), err } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resource_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go similarity index 66% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resource_client.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go index c4776ee54f4..5e983c645b7 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resource_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go @@ -19,34 +19,34 @@ limitations under the License. package fake import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1" + v1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeResourceV1alpha1 struct { +type FakeResourceV1alpha2 struct { *testing.Fake } -func (c *FakeResourceV1alpha1) PodSchedulings(namespace string) v1alpha1.PodSchedulingInterface { +func (c *FakeResourceV1alpha2) PodSchedulings(namespace string) v1alpha2.PodSchedulingInterface { return &FakePodSchedulings{c, namespace} } -func (c *FakeResourceV1alpha1) ResourceClaims(namespace string) v1alpha1.ResourceClaimInterface { +func (c *FakeResourceV1alpha2) ResourceClaims(namespace string) v1alpha2.ResourceClaimInterface { return &FakeResourceClaims{c, namespace} } -func (c *FakeResourceV1alpha1) ResourceClaimTemplates(namespace string) v1alpha1.ResourceClaimTemplateInterface { +func (c *FakeResourceV1alpha2) ResourceClaimTemplates(namespace string) v1alpha2.ResourceClaimTemplateInterface { return &FakeResourceClaimTemplates{c, namespace} } -func (c *FakeResourceV1alpha1) ResourceClasses() v1alpha1.ResourceClassInterface { +func (c *FakeResourceV1alpha2) ResourceClasses() v1alpha2.ResourceClassInterface { return &FakeResourceClasses{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeResourceV1alpha1) RESTClient() rest.Interface { +func (c *FakeResourceV1alpha2) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaim.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go similarity index 73% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaim.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go index a5fe28dc5a3..087e51f7144 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaim.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go @@ -23,40 +23,40 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" testing "k8s.io/client-go/testing" ) // FakeResourceClaims implements ResourceClaimInterface type FakeResourceClaims struct { - Fake *FakeResourceV1alpha1 + Fake *FakeResourceV1alpha2 ns string } -var resourceclaimsResource = v1alpha1.SchemeGroupVersion.WithResource("resourceclaims") +var resourceclaimsResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaims") -var resourceclaimsKind = v1alpha1.SchemeGroupVersion.WithKind("ResourceClaim") +var resourceclaimsKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim") // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimsResource, c.ns, name), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewGetAction(resourceclaimsResource, c.ns, name), &v1alpha2.ResourceClaim{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaim), err + return obj.(*v1alpha2.ResourceClaim), err } // List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimList, err error) { +func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimsResource, resourceclaimsKind, c.ns, opts), &v1alpha1.ResourceClaimList{}) + Invokes(testing.NewListAction(resourceclaimsResource, resourceclaimsKind, c.ns, opts), &v1alpha2.ResourceClaimList{}) if obj == nil { return nil, err @@ -66,8 +66,8 @@ func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (res if label == nil { label = labels.Everything() } - list := &v1alpha1.ResourceClaimList{ListMeta: obj.(*v1alpha1.ResourceClaimList).ListMeta} - for _, item := range obj.(*v1alpha1.ResourceClaimList).Items { + list := &v1alpha2.ResourceClaimList{ListMeta: obj.(*v1alpha2.ResourceClaimList).ListMeta} + for _, item := range obj.(*v1alpha2.ResourceClaimList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -83,43 +83,43 @@ func (c *FakeResourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (wa } // Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewCreateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaim), err + return obj.(*v1alpha2.ResourceClaim), err } // Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewUpdateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaim), err + return obj.(*v1alpha2.ResourceClaim), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) { +func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourceclaimsResource, "status", c.ns, resourceClaim), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewUpdateSubresourceAction(resourceclaimsResource, "status", c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaim), err + return obj.(*v1alpha2.ResourceClaim), err } // Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha2.ResourceClaim{}) return err } @@ -128,23 +128,23 @@ func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.De func (c *FakeResourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(resourceclaimsResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.ResourceClaimList{}) + _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimList{}) return err } // Patch applies the patch and returns the patched resourceClaim. -func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) { +func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaim{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaim), err + return obj.(*v1alpha2.ResourceClaim), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -157,17 +157,17 @@ func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaim{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaim), err + return obj.(*v1alpha2.ResourceClaim), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -180,10 +180,10 @@ func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *res return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.ResourceClaim{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaim), err + return obj.(*v1alpha2.ResourceClaim), err } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaimtemplate.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go similarity index 72% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaimtemplate.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go index 47b78e22a3f..2a1b4554ebe 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclaimtemplate.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go @@ -23,40 +23,40 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" testing "k8s.io/client-go/testing" ) // FakeResourceClaimTemplates implements ResourceClaimTemplateInterface type FakeResourceClaimTemplates struct { - Fake *FakeResourceV1alpha1 + Fake *FakeResourceV1alpha2 ns string } -var resourceclaimtemplatesResource = v1alpha1.SchemeGroupVersion.WithResource("resourceclaimtemplates") +var resourceclaimtemplatesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates") -var resourceclaimtemplatesKind = v1alpha1.SchemeGroupVersion.WithKind("ResourceClaimTemplate") +var resourceclaimtemplatesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimTemplate") // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimtemplatesResource, c.ns, name), &v1alpha1.ResourceClaimTemplate{}) + Invokes(testing.NewGetAction(resourceclaimtemplatesResource, c.ns, name), &v1alpha2.ResourceClaimTemplate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaimTemplate), err + return obj.(*v1alpha2.ResourceClaimTemplate), err } // List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimTemplateList, err error) { +func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), &v1alpha1.ResourceClaimTemplateList{}) + Invokes(testing.NewListAction(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), &v1alpha2.ResourceClaimTemplateList{}) if obj == nil { return nil, err @@ -66,8 +66,8 @@ func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptio if label == nil { label = labels.Everything() } - list := &v1alpha1.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha1.ResourceClaimTemplateList).ListMeta} - for _, item := range obj.(*v1alpha1.ResourceClaimTemplateList).Items { + list := &v1alpha2.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha2.ResourceClaimTemplateList).ListMeta} + for _, item := range obj.(*v1alpha2.ResourceClaimTemplateList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -83,31 +83,31 @@ func (c *FakeResourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOpti } // Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha1.ResourceClaimTemplate{}) + Invokes(testing.NewCreateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaimTemplate), err + return obj.(*v1alpha2.ResourceClaimTemplate), err } // Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha1.ResourceClaimTemplate{}) + Invokes(testing.NewUpdateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaimTemplate), err + return obj.(*v1alpha2.ResourceClaimTemplate), err } // Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha1.ResourceClaimTemplate{}) + Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha2.ResourceClaimTemplate{}) return err } @@ -116,23 +116,23 @@ func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, op func (c *FakeResourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(resourceclaimtemplatesResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.ResourceClaimTemplateList{}) + _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimTemplateList{}) return err } // Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, name, pt, data, subresources...), &v1alpha1.ResourceClaimTemplate{}) + Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimTemplate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaimTemplate), err + return obj.(*v1alpha2.ResourceClaimTemplate), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { if resourceClaimTemplate == nil { return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") } @@ -145,10 +145,10 @@ func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTem return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.ResourceClaimTemplate{}) + Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimTemplate{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClaimTemplate), err + return obj.(*v1alpha2.ResourceClaimTemplate), err } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go similarity index 73% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclass.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go index 47ccbd6310b..4d247c5136a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake/fake_resourceclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go @@ -23,38 +23,38 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" testing "k8s.io/client-go/testing" ) // FakeResourceClasses implements ResourceClassInterface type FakeResourceClasses struct { - Fake *FakeResourceV1alpha1 + Fake *FakeResourceV1alpha2 } -var resourceclassesResource = v1alpha1.SchemeGroupVersion.WithResource("resourceclasses") +var resourceclassesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclasses") -var resourceclassesKind = v1alpha1.SchemeGroupVersion.WithKind("ResourceClass") +var resourceclassesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClass") // Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *FakeResourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *FakeResourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(resourceclassesResource, name), &v1alpha1.ResourceClass{}) + Invokes(testing.NewRootGetAction(resourceclassesResource, name), &v1alpha2.ResourceClass{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClass), err + return obj.(*v1alpha2.ResourceClass), err } // List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *FakeResourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClassList, err error) { +func (c *FakeResourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(resourceclassesResource, resourceclassesKind, opts), &v1alpha1.ResourceClassList{}) + Invokes(testing.NewRootListAction(resourceclassesResource, resourceclassesKind, opts), &v1alpha2.ResourceClassList{}) if obj == nil { return nil, err } @@ -63,8 +63,8 @@ func (c *FakeResourceClasses) List(ctx context.Context, opts v1.ListOptions) (re if label == nil { label = labels.Everything() } - list := &v1alpha1.ResourceClassList{ListMeta: obj.(*v1alpha1.ResourceClassList).ListMeta} - for _, item := range obj.(*v1alpha1.ResourceClassList).Items { + list := &v1alpha2.ResourceClassList{ListMeta: obj.(*v1alpha2.ResourceClassList).ListMeta} + for _, item := range obj.(*v1alpha2.ResourceClassList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -79,29 +79,29 @@ func (c *FakeResourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (w } // Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *FakeResourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(resourceclassesResource, resourceClass), &v1alpha1.ResourceClass{}) + Invokes(testing.NewRootCreateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClass), err + return obj.(*v1alpha2.ResourceClass), err } // Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *FakeResourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(resourceclassesResource, resourceClass), &v1alpha1.ResourceClass{}) + Invokes(testing.NewRootUpdateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClass), err + return obj.(*v1alpha2.ResourceClass), err } // Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. func (c *FakeResourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(resourceclassesResource, name, opts), &v1alpha1.ResourceClass{}) + Invokes(testing.NewRootDeleteActionWithOptions(resourceclassesResource, name, opts), &v1alpha2.ResourceClass{}) return err } @@ -109,22 +109,22 @@ func (c *FakeResourceClasses) Delete(ctx context.Context, name string, opts v1.D func (c *FakeResourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(resourceclassesResource, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.ResourceClassList{}) + _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassList{}) return err } // Patch applies the patch and returns the patched resourceClass. -func (c *FakeResourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) { +func (c *FakeResourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, name, pt, data, subresources...), &v1alpha1.ResourceClass{}) + Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, name, pt, data, subresources...), &v1alpha2.ResourceClass{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClass), err + return obj.(*v1alpha2.ResourceClass), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *FakeResourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *FakeResourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { if resourceClass == nil { return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") } @@ -137,9 +137,9 @@ func (c *FakeResourceClasses) Apply(ctx context.Context, resourceClass *resource return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.ResourceClass{}) + Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClass{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.ResourceClass), err + return obj.(*v1alpha2.ResourceClass), err } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go similarity index 97% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go index df88c2f93be..2c1ff795c7d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 type PodSchedulingExpansion interface{} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podscheduling.go similarity index 79% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podscheduling.go index e163a845615..49de8ed1617 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podscheduling.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type PodSchedulingsGetter interface { // PodSchedulingInterface has methods to work with PodScheduling resources. type PodSchedulingInterface interface { - Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (*v1alpha1.PodScheduling, error) - Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) - UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) + Create(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.CreateOptions) (*v1alpha2.PodScheduling, error) + Update(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (*v1alpha2.PodScheduling, error) + UpdateStatus(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (*v1alpha2.PodScheduling, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodScheduling, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodSchedulingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodScheduling, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) - Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) - ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodScheduling, err error) + Apply(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) + ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) PodSchedulingExpansion } @@ -62,7 +62,7 @@ type podSchedulings struct { } // newPodSchedulings returns a PodSchedulings -func newPodSchedulings(c *ResourceV1alpha1Client, namespace string) *podSchedulings { +func newPodSchedulings(c *ResourceV1alpha2Client, namespace string) *podSchedulings { return &podSchedulings{ client: c.RESTClient(), ns: namespace, @@ -70,8 +70,8 @@ func newPodSchedulings(c *ResourceV1alpha1Client, namespace string) *podScheduli } // Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any. -func (c *podSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} +func (c *podSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodScheduling, err error) { + result = &v1alpha2.PodScheduling{} err = c.client.Get(). Namespace(c.ns). Resource("podschedulings"). @@ -83,12 +83,12 @@ func (c *podSchedulings) Get(ctx context.Context, name string, options v1.GetOpt } // List takes label and field selectors, and returns the list of PodSchedulings that match those selectors. -func (c *podSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodSchedulingList, err error) { +func (c *podSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.PodSchedulingList{} + result = &v1alpha2.PodSchedulingList{} err = c.client.Get(). Namespace(c.ns). Resource("podschedulings"). @@ -115,8 +115,8 @@ func (c *podSchedulings) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a podScheduling and creates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} +func (c *podSchedulings) Create(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.CreateOptions) (result *v1alpha2.PodScheduling, err error) { + result = &v1alpha2.PodScheduling{} err = c.client.Post(). Namespace(c.ns). Resource("podschedulings"). @@ -128,8 +128,8 @@ func (c *podSchedulings) Create(ctx context.Context, podScheduling *v1alpha1.Pod } // Update takes the representation of a podScheduling and updates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} +func (c *podSchedulings) Update(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (result *v1alpha2.PodScheduling, err error) { + result = &v1alpha2.PodScheduling{} err = c.client.Put(). Namespace(c.ns). Resource("podschedulings"). @@ -143,8 +143,8 @@ func (c *podSchedulings) Update(ctx context.Context, podScheduling *v1alpha1.Pod // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} +func (c *podSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (result *v1alpha2.PodScheduling, err error) { + result = &v1alpha2.PodScheduling{} err = c.client.Put(). Namespace(c.ns). Resource("podschedulings"). @@ -185,8 +185,8 @@ func (c *podSchedulings) DeleteCollection(ctx context.Context, opts v1.DeleteOpt } // Patch applies the patch and returns the patched podScheduling. -func (c *podSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} +func (c *podSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodScheduling, err error) { + result = &v1alpha2.PodScheduling{} err = c.client.Patch(pt). Namespace(c.ns). Resource("podschedulings"). @@ -200,7 +200,7 @@ func (c *podSchedulings) Patch(ctx context.Context, name string, pt types.PatchT } // Apply takes the given apply declarative configuration, applies it and returns the applied podScheduling. -func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { +func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) { if podScheduling == nil { return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") } @@ -213,7 +213,7 @@ func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alp if name == nil { return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") } - result = &v1alpha1.PodScheduling{} + result = &v1alpha2.PodScheduling{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("podschedulings"). @@ -227,7 +227,7 @@ func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alp // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { +func (c *podSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) { if podScheduling == nil { return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") } @@ -242,7 +242,7 @@ func (c *podSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourc return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") } - result = &v1alpha1.PodScheduling{} + result = &v1alpha2.PodScheduling{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("podschedulings"). diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go similarity index 69% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go index 2355bf7ccbe..2aa6290b161 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go @@ -16,17 +16,17 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "net/http" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type ResourceV1alpha1Interface interface { +type ResourceV1alpha2Interface interface { RESTClient() rest.Interface PodSchedulingsGetter ResourceClaimsGetter @@ -34,31 +34,31 @@ type ResourceV1alpha1Interface interface { ResourceClassesGetter } -// ResourceV1alpha1Client is used to interact with features provided by the resource.k8s.io group. -type ResourceV1alpha1Client struct { +// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha2Client struct { restClient rest.Interface } -func (c *ResourceV1alpha1Client) PodSchedulings(namespace string) PodSchedulingInterface { +func (c *ResourceV1alpha2Client) PodSchedulings(namespace string) PodSchedulingInterface { return newPodSchedulings(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaims(namespace string) ResourceClaimInterface { +func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { return newResourceClaims(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { +func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { return newResourceClaimTemplates(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClasses() ResourceClassInterface { +func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { return newResourceClasses(c) } -// NewForConfig creates a new ResourceV1alpha1Client for the given config. +// NewForConfig creates a new ResourceV1alpha2Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -70,9 +70,9 @@ func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new ResourceV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -81,12 +81,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Cli if err != nil { return nil, err } - return &ResourceV1alpha1Client{client}, nil + return &ResourceV1alpha2Client{client}, nil } -// NewForConfigOrDie creates a new ResourceV1alpha1Client for the given config and +// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -94,13 +94,13 @@ func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { return client } -// New creates a new ResourceV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ResourceV1alpha1Client { - return &ResourceV1alpha1Client{c} +// New creates a new ResourceV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha2Client { + return &ResourceV1alpha2Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1alpha2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -114,7 +114,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *ResourceV1alpha1Client) RESTClient() rest.Interface { +func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go similarity index 79% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go index cd2d0c78214..cfb27c9db68 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type ResourceClaimsGetter interface { // ResourceClaimInterface has methods to work with ResourceClaim resources. type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (*v1alpha1.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) - UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) + Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) + UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) ResourceClaimExpansion } @@ -62,7 +62,7 @@ type resourceClaims struct { } // newResourceClaims returns a ResourceClaims -func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceClaims { +func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { return &resourceClaims{ client: c.RESTClient(), ns: namespace, @@ -70,8 +70,8 @@ func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceCla } // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -83,12 +83,12 @@ func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOpt } // List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimList, err error) { +func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimList{} + result = &v1alpha2.ResourceClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -115,8 +115,8 @@ func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaims"). @@ -128,8 +128,8 @@ func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.Res } // Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -143,8 +143,8 @@ func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.Res // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -185,8 +185,8 @@ func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOpt } // Patch applies the patch and returns the patched resourceClaim. -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaims"). @@ -200,7 +200,7 @@ func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchT } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -213,7 +213,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). @@ -227,7 +227,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -242,7 +242,7 @@ func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourc return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go similarity index 80% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go index b6cc3d96eca..3f4e3200642 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClaimTemplatesGetter interface { // ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha1.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha1.ResourceClaimTemplate, error) + Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimTemplateList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) ResourceClaimTemplateExpansion } @@ -60,7 +60,7 @@ type resourceClaimTemplates struct { } // newResourceClaimTemplates returns a ResourceClaimTemplates -func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *resourceClaimTemplates { +func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { return &resourceClaimTemplates{ client: c.RESTClient(), ns: namespace, @@ -68,8 +68,8 @@ func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *res } // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -81,12 +81,12 @@ func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v } // List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimTemplateList, err error) { +func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimTemplateList{} + result = &v1alpha2.ResourceClaimTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -113,8 +113,8 @@ func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -126,8 +126,8 @@ func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTempla } // Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -167,8 +167,8 @@ func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.D } // Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -182,7 +182,7 @@ func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt type } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { if resourceClaimTemplate == nil { return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") } @@ -195,7 +195,7 @@ func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplat if name == nil { return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaimTemplate{} + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaimtemplates"). diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go similarity index 80% rename from staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go rename to staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go index 9c8b454639a..95a4ac5668e 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClassesGetter interface { // ResourceClassInterface has methods to work with ResourceClass resources. type ResourceClassInterface interface { - Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (*v1alpha1.ResourceClass, error) - Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (*v1alpha1.ResourceClass, error) + Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) + Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) - Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) + Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) ResourceClassExpansion } @@ -59,15 +59,15 @@ type resourceClasses struct { } // newResourceClasses returns a ResourceClasses -func newResourceClasses(c *ResourceV1alpha1Client) *resourceClasses { +func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { return &resourceClasses{ client: c.RESTClient(), } } // Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Get(). Resource("resourceclasses"). Name(name). @@ -78,12 +78,12 @@ func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOp } // List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClassList, err error) { +func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClassList{} + result = &v1alpha2.ResourceClassList{} err = c.client.Get(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -108,8 +108,8 @@ func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Post(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -120,8 +120,8 @@ func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.Re } // Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Put(). Resource("resourceclasses"). Name(resourceClass.Name). @@ -158,8 +158,8 @@ func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOp } // Patch applies the patch and returns the patched resourceClass. -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Patch(pt). Resource("resourceclasses"). Name(name). @@ -172,7 +172,7 @@ func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.Patch } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { if resourceClass == nil { return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") } @@ -185,7 +185,7 @@ func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1al if name == nil { return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") } - result = &v1alpha1.ResourceClass{} + result = &v1alpha2.ResourceClass{} err = c.client.Patch(types.ApplyPatchType). Resource("resourceclasses"). Name(*name). diff --git a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go similarity index 99% rename from staging/src/k8s.io/client-go/listers/resource/v1alpha1/expansion_generated.go rename to staging/src/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go index 94885e784fb..d5ca6ffdc00 100644 --- a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // PodSchedulingListerExpansion allows custom methods to be added to // PodSchedulingLister. diff --git a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/podscheduling.go b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/podscheduling.go similarity index 84% rename from staging/src/k8s.io/client-go/listers/resource/v1alpha1/podscheduling.go rename to staging/src/k8s.io/client-go/listers/resource/v1alpha2/podscheduling.go index fe43713710a..f8e0dc181fa 100644 --- a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/podscheduling.go +++ b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/podscheduling.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,7 +30,7 @@ import ( type PodSchedulingLister interface { // List lists all PodSchedulings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PodScheduling, err error) + List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error) // PodSchedulings returns an object that can list and get PodSchedulings. PodSchedulings(namespace string) PodSchedulingNamespaceLister PodSchedulingListerExpansion @@ -47,9 +47,9 @@ func NewPodSchedulingLister(indexer cache.Indexer) PodSchedulingLister { } // List lists all PodSchedulings in the indexer. -func (s *podSchedulingLister) List(selector labels.Selector) (ret []*v1alpha1.PodScheduling, err error) { +func (s *podSchedulingLister) List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PodScheduling)) + ret = append(ret, m.(*v1alpha2.PodScheduling)) }) return ret, err } @@ -64,10 +64,10 @@ func (s *podSchedulingLister) PodSchedulings(namespace string) PodSchedulingName type PodSchedulingNamespaceLister interface { // List lists all PodSchedulings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PodScheduling, err error) + List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error) // Get retrieves the PodScheduling from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PodScheduling, error) + Get(name string) (*v1alpha2.PodScheduling, error) PodSchedulingNamespaceListerExpansion } @@ -79,21 +79,21 @@ type podSchedulingNamespaceLister struct { } // List lists all PodSchedulings in the indexer for a given namespace. -func (s podSchedulingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodScheduling, err error) { +func (s podSchedulingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PodScheduling)) + ret = append(ret, m.(*v1alpha2.PodScheduling)) }) return ret, err } // Get retrieves the PodScheduling from the indexer for a given namespace and name. -func (s podSchedulingNamespaceLister) Get(name string) (*v1alpha1.PodScheduling, error) { +func (s podSchedulingNamespaceLister) Get(name string) (*v1alpha2.PodScheduling, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("podscheduling"), name) + return nil, errors.NewNotFound(v1alpha2.Resource("podscheduling"), name) } - return obj.(*v1alpha1.PodScheduling), nil + return obj.(*v1alpha2.PodScheduling), nil } diff --git a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclaim.go b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go similarity index 84% rename from staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclaim.go rename to staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go index 05d5e0cfa4d..273f16af31f 100644 --- a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclaim.go +++ b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,7 +30,7 @@ import ( type ResourceClaimLister interface { // List lists all ResourceClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ResourceClaim, err error) + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) // ResourceClaims returns an object that can list and get ResourceClaims. ResourceClaims(namespace string) ResourceClaimNamespaceLister ResourceClaimListerExpansion @@ -47,9 +47,9 @@ func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { } // List lists all ResourceClaims in the indexer. -func (s *resourceClaimLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceClaim, err error) { +func (s *resourceClaimLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ResourceClaim)) + ret = append(ret, m.(*v1alpha2.ResourceClaim)) }) return ret, err } @@ -64,10 +64,10 @@ func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimName type ResourceClaimNamespaceLister interface { // List lists all ResourceClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ResourceClaim, err error) + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) // Get retrieves the ResourceClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ResourceClaim, error) + Get(name string) (*v1alpha2.ResourceClaim, error) ResourceClaimNamespaceListerExpansion } @@ -79,21 +79,21 @@ type resourceClaimNamespaceLister struct { } // List lists all ResourceClaims in the indexer for a given namespace. -func (s resourceClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceClaim, err error) { +func (s resourceClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ResourceClaim)) + ret = append(ret, m.(*v1alpha2.ResourceClaim)) }) return ret, err } // Get retrieves the ResourceClaim from the indexer for a given namespace and name. -func (s resourceClaimNamespaceLister) Get(name string) (*v1alpha1.ResourceClaim, error) { +func (s resourceClaimNamespaceLister) Get(name string) (*v1alpha2.ResourceClaim, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("resourceclaim"), name) + return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaim"), name) } - return obj.(*v1alpha1.ResourceClaim), nil + return obj.(*v1alpha2.ResourceClaim), nil } diff --git a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclaimtemplate.go b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go similarity index 84% rename from staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclaimtemplate.go rename to staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go index 97acddc7af5..91a488b1749 100644 --- a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclaimtemplate.go +++ b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,7 +30,7 @@ import ( type ResourceClaimTemplateLister interface { // List lists all ResourceClaimTemplates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister ResourceClaimTemplateListerExpansion @@ -47,9 +47,9 @@ func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplate } // List lists all ResourceClaimTemplates in the indexer. -func (s *resourceClaimTemplateLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceClaimTemplate, err error) { +func (s *resourceClaimTemplateLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ResourceClaimTemplate)) + ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) }) return ret, err } @@ -64,10 +64,10 @@ func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) R type ResourceClaimTemplateNamespaceLister interface { // List lists all ResourceClaimTemplates in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ResourceClaimTemplate, error) + Get(name string) (*v1alpha2.ResourceClaimTemplate, error) ResourceClaimTemplateNamespaceListerExpansion } @@ -79,21 +79,21 @@ type resourceClaimTemplateNamespaceLister struct { } // List lists all ResourceClaimTemplates in the indexer for a given namespace. -func (s resourceClaimTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceClaimTemplate, err error) { +func (s resourceClaimTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ResourceClaimTemplate)) + ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) }) return ret, err } // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. -func (s resourceClaimTemplateNamespaceLister) Get(name string) (*v1alpha1.ResourceClaimTemplate, error) { +func (s resourceClaimTemplateNamespaceLister) Get(name string) (*v1alpha2.ResourceClaimTemplate, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("resourceclaimtemplate"), name) + return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaimtemplate"), name) } - return obj.(*v1alpha1.ResourceClaimTemplate), nil + return obj.(*v1alpha2.ResourceClaimTemplate), nil } diff --git a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclass.go b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go similarity index 81% rename from staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclass.go rename to staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go index 8d4dbf4d04c..eeb2fc33794 100644 --- a/staging/src/k8s.io/client-go/listers/resource/v1alpha1/resourceclass.go +++ b/staging/src/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,10 +30,10 @@ import ( type ResourceClassLister interface { // List lists all ResourceClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ResourceClass, err error) + List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) // Get retrieves the ResourceClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ResourceClass, error) + Get(name string) (*v1alpha2.ResourceClass, error) ResourceClassListerExpansion } @@ -48,21 +48,21 @@ func NewResourceClassLister(indexer cache.Indexer) ResourceClassLister { } // List lists all ResourceClasses in the indexer. -func (s *resourceClassLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceClass, err error) { +func (s *resourceClassLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ResourceClass)) + ret = append(ret, m.(*v1alpha2.ResourceClass)) }) return ret, err } // Get retrieves the ResourceClass from the index for a given name. -func (s *resourceClassLister) Get(name string) (*v1alpha1.ResourceClass, error) { +func (s *resourceClassLister) Get(name string) (*v1alpha2.ResourceClass, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("resourceclass"), name) + return nil, errors.NewNotFound(v1alpha2.Resource("resourceclass"), name) } - return obj.(*v1alpha1.ResourceClass), nil + return obj.(*v1alpha2.ResourceClass), nil } diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go index 681fcb798a7..bd46b4582a7 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go @@ -28,7 +28,7 @@ import ( "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -37,7 +37,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" corev1types "k8s.io/client-go/kubernetes/typed/core/v1" - resourcev1alpha1listers "k8s.io/client-go/listers/resource/v1alpha1" + resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" @@ -59,14 +59,14 @@ type Driver interface { // possible. class.Parameters may be nil. // // The caller will wrap the error to include the parameter reference. - GetClassParameters(ctx context.Context, class *resourcev1alpha1.ResourceClass) (interface{}, error) + GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error) // GetClaimParameters gets called to retrieve the parameter object // referenced by a claim. The content should be validated now if // possible. claim.Spec.Parameters may be nil. // // The caller will wrap the error to include the parameter reference. - GetClaimParameters(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, class *resourcev1alpha1.ResourceClass, classParameters interface{}) (interface{}, error) + GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error) // Allocate gets called when a ResourceClaim is ready to be allocated. // The selectedNode is empty for ResourceClaims with immediate @@ -85,7 +85,7 @@ type Driver interface { // // The objects are read-only and must not be modified. This call // must be idempotent. - Allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (*resourcev1alpha1.AllocationResult, error) + Allocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (*resourcev1alpha2.AllocationResult, error) // Deallocate gets called when a ResourceClaim is ready to be // freed. @@ -97,7 +97,7 @@ type Driver interface { // Deallocate may get called when a previous allocation got // interrupted. Deallocate must then stop any on-going allocation // activity and free resources before returning without an error. - Deallocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim) error + Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error // UnsuitableNodes checks all pending claims with delayed allocation // for a pod. All claims are ready for allocation by the driver @@ -117,8 +117,8 @@ type Driver interface { // pod.Spec.ResourceClaim entry. type ClaimAllocation struct { PodClaimName string - Claim *resourcev1alpha1.ResourceClaim - Class *resourcev1alpha1.ResourceClass + Claim *resourcev1alpha2.ResourceClaim + Class *resourcev1alpha2.ResourceClass ClaimParameters interface{} ClassParameters interface{} @@ -136,10 +136,10 @@ type controller struct { kubeClient kubernetes.Interface queue workqueue.RateLimitingInterface eventRecorder record.EventRecorder - rcLister resourcev1alpha1listers.ResourceClassLister + rcLister resourcev1alpha2listers.ResourceClassLister rcSynced cache.InformerSynced claimCache cache.MutationCache - podSchedulingLister resourcev1alpha1listers.PodSchedulingLister + podSchedulingLister resourcev1alpha2listers.PodSchedulingLister claimSynced cache.InformerSynced podSchedulingSynced cache.InformerSynced } @@ -155,9 +155,9 @@ func New( kubeClient kubernetes.Interface, informerFactory informers.SharedInformerFactory) Controller { logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller") - rcInformer := informerFactory.Resource().V1alpha1().ResourceClasses() - claimInformer := informerFactory.Resource().V1alpha1().ResourceClaims() - podSchedulingInformer := informerFactory.Resource().V1alpha1().PodSchedulings() + rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses() + claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() + podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings() eventBroadcaster := record.NewBroadcaster() go func() { @@ -277,9 +277,9 @@ func getKey(obj interface{}) (string, error) { } prefix := "" switch obj.(type) { - case *resourcev1alpha1.ResourceClaim: + case *resourcev1alpha2.ResourceClaim: prefix = claimKeyPrefix - case *resourcev1alpha1.PodScheduling: + case *resourcev1alpha2.PodScheduling: prefix = podSchedulingKeyPrefix default: return "", fmt.Errorf("unexpected object: %T", obj) @@ -384,7 +384,7 @@ func (ctrl *controller) syncKey(ctx context.Context, key string) (obj runtime.Ob return } -func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resourcev1alpha1.ResourceClaim, error) { +func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resourcev1alpha2.ResourceClaim, error) { claimObj, exists, err := ctrl.claimCache.GetByKey(key) if !exists || k8serrors.IsNotFound(err) { klog.FromContext(ctx).V(5).Info("ResourceClaim not found, no need to process it") @@ -393,16 +393,16 @@ func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resour if err != nil { return nil, err } - claim, ok := claimObj.(*resourcev1alpha1.ResourceClaim) + claim, ok := claimObj.(*resourcev1alpha2.ResourceClaim) if !ok { - return nil, fmt.Errorf("internal error: got %T instead of *resourcev1alpha1.ResourceClaim from claim cache", claimObj) + return nil, fmt.Errorf("internal error: got %T instead of *resourcev1alpha2.ResourceClaim from claim cache", claimObj) } return claim, nil } // syncClaim determines which next action may be needed for a ResourceClaim // and does it. -func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha1.ResourceClaim) error { +func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error { var err error logger := klog.FromContext(ctx) @@ -433,7 +433,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha1.R claim.Status.Allocation = nil claim.Status.DriverName = "" claim.Status.DeallocationRequested = false - claim, err = ctrl.kubeClient.ResourceV1alpha1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) + claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("remove allocation: %v", err) } @@ -448,7 +448,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha1.R if claim.Status.DeallocationRequested { // Still need to remove it. claim.Status.DeallocationRequested = false - claim, err = ctrl.kubeClient.ResourceV1alpha1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) + claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("remove deallocation: %v", err) } @@ -456,7 +456,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha1.R } claim.Finalizers = ctrl.removeFinalizer(claim.Finalizers) - claim, err = ctrl.kubeClient.ResourceV1alpha1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) + claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("remove finalizer: %v", err) } @@ -472,7 +472,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha1.R logger.V(5).Info("ResourceClaim is allocated") return nil } - if claim.Spec.AllocationMode != resourcev1alpha1.AllocationModeImmediate { + if claim.Spec.AllocationMode != resourcev1alpha2.AllocationModeImmediate { logger.V(5).Info("ResourceClaim waiting for first consumer") return nil } @@ -503,7 +503,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha1.R return ctrl.allocateClaim(ctx, claim, claimParameters, class, classParameters, "", nil) } -func (ctrl *controller) getParameters(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, class *resourcev1alpha1.ResourceClass) (claimParameters, classParameters interface{}, err error) { +func (ctrl *controller) getParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass) (claimParameters, classParameters interface{}, err error) { classParameters, err = ctrl.driver.GetClassParameters(ctx, class) if err != nil { err = fmt.Errorf("class parameters %s: %v", class.ParametersRef, err) @@ -518,10 +518,10 @@ func (ctrl *controller) getParameters(ctx context.Context, claim *resourcev1alph } func (ctrl *controller) allocateClaim(ctx context.Context, - claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, - class *resourcev1alpha1.ResourceClass, classParameters interface{}, + claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, + class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string, - selectedUser *resourcev1alpha1.ResourceClaimConsumerReference) error { + selectedUser *resourcev1alpha2.ResourceClaimConsumerReference) error { logger := klog.FromContext(ctx) if claim.Status.Allocation != nil { @@ -538,7 +538,7 @@ func (ctrl *controller) allocateClaim(ctx context.Context, logger.V(5).Info("Adding finalizer") claim.Finalizers = append(claim.Finalizers, ctrl.finalizer) var err error - claim, err = ctrl.kubeClient.ResourceV1alpha1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) + claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("add finalizer: %v", err) } @@ -556,7 +556,7 @@ func (ctrl *controller) allocateClaim(ctx context.Context, claim.Status.ReservedFor = append(claim.Status.ReservedFor, *selectedUser) } logger.V(6).Info("Updating claim after allocation", "claim", claim) - claim, err = ctrl.kubeClient.ResourceV1alpha1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) + claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("add allocation: %v", err) } @@ -576,7 +576,7 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim return nil, err } } - if claim.Spec.AllocationMode != resourcev1alpha1.AllocationModeWaitForFirstConsumer { + if claim.Spec.AllocationMode != resourcev1alpha2.AllocationModeWaitForFirstConsumer { // Nothing to do for it as part of pod scheduling. return nil, nil } @@ -603,7 +603,7 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim // syncClaim determines which next action may be needed for a PodScheduling object // and does it. -func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *resourcev1alpha1.PodScheduling) error { +func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *resourcev1alpha2.PodScheduling) error { logger := klog.FromContext(ctx) // Ignore deleted objects. @@ -685,7 +685,7 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re logger.V(2).Info("skipping allocation for unsuitable selected node", "node", selectedNode) } else { logger.V(2).Info("allocation for selected node", "node", selectedNode) - selectedUser := &resourcev1alpha1.ResourceClaimConsumerReference{ + selectedUser := &resourcev1alpha2.ResourceClaimConsumerReference{ Resource: "pods", Name: pod.Name, UID: pod.UID, @@ -709,7 +709,7 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re if i < 0 { // Add new entry. podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims, - resourcev1alpha1.ResourceClaimSchedulingStatus{ + resourcev1alpha2.ResourceClaimSchedulingStatus{ Name: delayed.PodClaimName, UnsuitableNodes: delayed.UnsuitableNodes, }) @@ -722,7 +722,7 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re } if modified { logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podScheduling", podScheduling) - if _, err := ctrl.kubeClient.ResourceV1alpha1().PodSchedulings(podScheduling.Namespace).UpdateStatus(ctx, podScheduling, metav1.UpdateOptions{}); err != nil { + if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).UpdateStatus(ctx, podScheduling, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("update unsuitable node status: %v", err) } } @@ -747,7 +747,7 @@ func (claims claimAllocations) MarshalLog() interface{} { var _ logr.Marshaler = claimAllocations{} // findClaim returns the index of the specified pod claim, -1 if not found. -func findClaim(claims []resourcev1alpha1.ResourceClaimSchedulingStatus, podClaimName string) int { +func findClaim(claims []resourcev1alpha2.ResourceClaimSchedulingStatus, podClaimName string) int { for i := range claims { if claims[i].Name == podClaimName { return i @@ -780,7 +780,7 @@ func stringsDiffer(a, b []string) bool { } // hasFinalizer checks if the claim has the finalizer of the driver. -func (ctrl *controller) hasFinalizer(claim *resourcev1alpha1.ResourceClaim) bool { +func (ctrl *controller) hasFinalizer(claim *resourcev1alpha2.ResourceClaim) bool { for _, finalizer := range claim.Finalizers { if finalizer == ctrl.finalizer { return true diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go index d9b6a3eb586..824762e6265 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" @@ -46,14 +46,14 @@ func TestController(t *testing.T) { otherClassName := "other-class" ourFinalizer := driverName + "/deletion-protection" otherFinalizer := otherDriverName + "/deletion-protection" - classes := []*resourcev1alpha1.ResourceClass{ + classes := []*resourcev1alpha2.ResourceClass{ createClass(className, driverName), createClass(otherClassName, otherDriverName), } claim := createClaim(claimName, claimNamespace, className) otherClaim := createClaim(claimName, claimNamespace, otherClassName) delayedClaim := claim.DeepCopy() - delayedClaim.Spec.AllocationMode = resourcev1alpha1.AllocationModeWaitForFirstConsumer + delayedClaim.Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer podName := "pod" podKey := "podscheduling:default/pod" pod := createPod(podName, claimNamespace, nil) @@ -64,51 +64,51 @@ func TestController(t *testing.T) { otherNodeName := "worker-2" unsuitableNodes := []string{otherNodeName} potentialNodes := []string{nodeName, otherNodeName} - withDeletionTimestamp := func(claim *resourcev1alpha1.ResourceClaim) *resourcev1alpha1.ResourceClaim { + withDeletionTimestamp := func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim { var deleted metav1.Time claim = claim.DeepCopy() claim.DeletionTimestamp = &deleted return claim } - withReservedFor := func(claim *resourcev1alpha1.ResourceClaim, pod *corev1.Pod) *resourcev1alpha1.ResourceClaim { + withReservedFor := func(claim *resourcev1alpha2.ResourceClaim, pod *corev1.Pod) *resourcev1alpha2.ResourceClaim { claim = claim.DeepCopy() - claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourcev1alpha1.ResourceClaimConsumerReference{ + claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourcev1alpha2.ResourceClaimConsumerReference{ Resource: "pods", Name: pod.Name, UID: pod.UID, }) return claim } - withFinalizer := func(claim *resourcev1alpha1.ResourceClaim, finalizer string) *resourcev1alpha1.ResourceClaim { + withFinalizer := func(claim *resourcev1alpha2.ResourceClaim, finalizer string) *resourcev1alpha2.ResourceClaim { claim = claim.DeepCopy() claim.Finalizers = append(claim.Finalizers, finalizer) return claim } - allocation := resourcev1alpha1.AllocationResult{} - withAllocate := func(claim *resourcev1alpha1.ResourceClaim) *resourcev1alpha1.ResourceClaim { + allocation := resourcev1alpha2.AllocationResult{} + withAllocate := func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim { // Any allocated claim must have our finalizer. claim = withFinalizer(claim, ourFinalizer) claim.Status.Allocation = &allocation claim.Status.DriverName = driverName return claim } - withDeallocate := func(claim *resourcev1alpha1.ResourceClaim) *resourcev1alpha1.ResourceClaim { + withDeallocate := func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim { claim.Status.DeallocationRequested = true return claim } - withSelectedNode := func(podScheduling *resourcev1alpha1.PodScheduling) *resourcev1alpha1.PodScheduling { + withSelectedNode := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { podScheduling = podScheduling.DeepCopy() podScheduling.Spec.SelectedNode = nodeName return podScheduling } - withUnsuitableNodes := func(podScheduling *resourcev1alpha1.PodScheduling) *resourcev1alpha1.PodScheduling { + withUnsuitableNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { podScheduling = podScheduling.DeepCopy() podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims, - resourcev1alpha1.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes}, + resourcev1alpha2.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes}, ) return podScheduling } - withPotentialNodes := func(podScheduling *resourcev1alpha1.PodScheduling) *resourcev1alpha1.PodScheduling { + withPotentialNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { podScheduling = podScheduling.DeepCopy() podScheduling.Spec.PotentialNodes = potentialNodes return podScheduling @@ -119,10 +119,10 @@ func TestController(t *testing.T) { for name, test := range map[string]struct { key string driver mockDriver - classes []*resourcev1alpha1.ResourceClass + classes []*resourcev1alpha2.ResourceClass pod *corev1.Pod - podScheduling, expectedPodScheduling *resourcev1alpha1.PodScheduling - claim, expectedClaim *resourcev1alpha1.ResourceClaim + podScheduling, expectedPodScheduling *resourcev1alpha2.PodScheduling + claim, expectedClaim *resourcev1alpha2.ResourceClaim expectedError string }{ "invalid-key": { @@ -395,10 +395,10 @@ func TestController(t *testing.T) { initialObjects = append(initialObjects, test.claim) } kubeClient, informerFactory := fakeK8s(initialObjects) - rcInformer := informerFactory.Resource().V1alpha1().ResourceClasses() - claimInformer := informerFactory.Resource().V1alpha1().ResourceClaims() + rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses() + claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() podInformer := informerFactory.Core().V1().Pods() - podSchedulingInformer := informerFactory.Resource().V1alpha1().PodSchedulings() + podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings() // Order is important: on function exit, we first must // cancel, then wait (last-in-first-out). defer informerFactory.Shutdown() @@ -406,13 +406,13 @@ func TestController(t *testing.T) { for _, obj := range initialObjects { switch obj.(type) { - case *resourcev1alpha1.ResourceClass: + case *resourcev1alpha2.ResourceClass: require.NoError(t, rcInformer.Informer().GetStore().Add(obj), "add resource class") - case *resourcev1alpha1.ResourceClaim: + case *resourcev1alpha2.ResourceClaim: require.NoError(t, claimInformer.Informer().GetStore().Add(obj), "add resource claim") case *corev1.Pod: require.NoError(t, podInformer.Informer().GetStore().Add(obj), "add pod") - case *resourcev1alpha1.PodScheduling: + case *resourcev1alpha2.PodScheduling: require.NoError(t, podSchedulingInformer.Informer().GetStore().Add(obj), "add pod scheduling") default: t.Fatalf("unknown initialObject type: %+v", obj) @@ -425,9 +425,9 @@ func TestController(t *testing.T) { ctrl := New(ctx, driverName, driver, kubeClient, informerFactory) informerFactory.Start(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), - informerFactory.Resource().V1alpha1().ResourceClasses().Informer().HasSynced, - informerFactory.Resource().V1alpha1().ResourceClaims().Informer().HasSynced, - informerFactory.Resource().V1alpha1().PodSchedulings().Informer().HasSynced, + informerFactory.Resource().V1alpha2().ResourceClasses().Informer().HasSynced, + informerFactory.Resource().V1alpha2().ResourceClaims().Informer().HasSynced, + informerFactory.Resource().V1alpha2().PodSchedulings().Informer().HasSynced, ) { t.Fatal("could not sync caches") } @@ -441,17 +441,17 @@ func TestController(t *testing.T) { if err != nil && err.Error() != test.expectedError { t.Fatalf("expected error %q, got %q", test.expectedError, err.Error()) } - claims, err := kubeClient.ResourceV1alpha1().ResourceClaims("").List(ctx, metav1.ListOptions{}) + claims, err := kubeClient.ResourceV1alpha2().ResourceClaims("").List(ctx, metav1.ListOptions{}) require.NoError(t, err, "list claims") - var expectedClaims []resourcev1alpha1.ResourceClaim + var expectedClaims []resourcev1alpha2.ResourceClaim if test.expectedClaim != nil { expectedClaims = append(expectedClaims, *test.expectedClaim) } assert.Equal(t, expectedClaims, claims.Items) - podSchedulings, err := kubeClient.ResourceV1alpha1().PodSchedulings("").List(ctx, metav1.ListOptions{}) + podSchedulings, err := kubeClient.ResourceV1alpha2().PodSchedulings("").List(ctx, metav1.ListOptions{}) require.NoError(t, err, "list pod schedulings") - var expectedPodSchedulings []resourcev1alpha1.PodScheduling + var expectedPodSchedulings []resourcev1alpha2.PodScheduling if test.expectedPodScheduling != nil { expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedPodScheduling) } @@ -479,7 +479,7 @@ type mockDriver struct { type allocate struct { selectedNode string - allocResult *resourcev1alpha1.AllocationResult + allocResult *resourcev1alpha2.AllocationResult allocErr error } @@ -509,7 +509,7 @@ func (m mockDriver) expectUnsuitableNodes(expected map[string][]string, err erro return m } -func (m mockDriver) GetClassParameters(ctx context.Context, class *resourcev1alpha1.ResourceClass) (interface{}, error) { +func (m mockDriver) GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error) { m.t.Logf("GetClassParameters(%s)", class) result, ok := m.classParameters[class.Name] if !ok { @@ -521,7 +521,7 @@ func (m mockDriver) GetClassParameters(ctx context.Context, class *resourcev1alp return result, nil } -func (m mockDriver) GetClaimParameters(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, class *resourcev1alpha1.ResourceClass, classParameters interface{}) (interface{}, error) { +func (m mockDriver) GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error) { m.t.Logf("GetClaimParameters(%s)", claim) result, ok := m.claimParameters[claim.Name] if !ok { @@ -533,7 +533,7 @@ func (m mockDriver) GetClaimParameters(ctx context.Context, claim *resourcev1alp return result, nil } -func (m mockDriver) Allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (*resourcev1alpha1.AllocationResult, error) { +func (m mockDriver) Allocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (*resourcev1alpha2.AllocationResult, error) { m.t.Logf("Allocate(%s)", claim) allocate, ok := m.allocate[claim.Name] if !ok { @@ -543,7 +543,7 @@ func (m mockDriver) Allocate(ctx context.Context, claim *resourcev1alpha1.Resour return allocate.allocResult, allocate.allocErr } -func (m mockDriver) Deallocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim) error { +func (m mockDriver) Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error { m.t.Logf("Deallocate(%s)", claim) err, ok := m.deallocate[claim.Name] if !ok { @@ -577,8 +577,8 @@ func (m mockDriver) UnsuitableNodes(ctx context.Context, pod *corev1.Pod, claims return nil } -func createClass(className, driverName string) *resourcev1alpha1.ResourceClass { - return &resourcev1alpha1.ResourceClass{ +func createClass(className, driverName string) *resourcev1alpha2.ResourceClass { + return &resourcev1alpha2.ResourceClass{ ObjectMeta: metav1.ObjectMeta{ Name: className, }, @@ -586,15 +586,15 @@ func createClass(className, driverName string) *resourcev1alpha1.ResourceClass { } } -func createClaim(claimName, claimNamespace, className string) *resourcev1alpha1.ResourceClaim { - return &resourcev1alpha1.ResourceClaim{ +func createClaim(claimName, claimNamespace, className string) *resourcev1alpha2.ResourceClaim { + return &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{ Name: claimName, Namespace: claimNamespace, }, - Spec: resourcev1alpha1.ResourceClaimSpec{ + Spec: resourcev1alpha2.ResourceClaimSpec{ ResourceClassName: className, - AllocationMode: resourcev1alpha1.AllocationModeImmediate, + AllocationMode: resourcev1alpha2.AllocationModeImmediate, }, } } @@ -620,9 +620,9 @@ func createPod(podName, podNamespace string, claims map[string]string) *corev1.P return pod } -func createPodScheduling(pod *corev1.Pod) *resourcev1alpha1.PodScheduling { +func createPodScheduling(pod *corev1.Pod) *resourcev1alpha2.PodScheduling { controller := true - return &resourcev1alpha1.PodScheduling{ + return &resourcev1alpha2.PodScheduling{ ObjectMeta: metav1.ObjectMeta{ Name: pod.Name, Namespace: pod.Namespace, diff --git a/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go b/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go index 5d5eb94c5e8..cfe35988962 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go @@ -27,7 +27,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -53,7 +53,7 @@ func Name(pod *v1.Pod, podClaim *v1.PodResourceClaim) string { // was created for the Pod. It returns an error that is informative // enough to be returned by the caller without adding further details // about the Pod or ResourceClaim. -func IsForPod(pod *v1.Pod, claim *resourcev1alpha1.ResourceClaim) error { +func IsForPod(pod *v1.Pod, claim *resourcev1alpha2.ResourceClaim) error { // Checking the namespaces is just a precaution. The caller should // never pass in a ResourceClaim that isn't from the same namespace as the // Pod. @@ -65,7 +65,7 @@ func IsForPod(pod *v1.Pod, claim *resourcev1alpha1.ResourceClaim) error { // IsReservedForPod checks whether a claim lists the pod as one of the objects // that the claim was reserved for. -func IsReservedForPod(pod *v1.Pod, claim *resourcev1alpha1.ResourceClaim) bool { +func IsReservedForPod(pod *v1.Pod, claim *resourcev1alpha2.ResourceClaim) bool { for _, reserved := range claim.Status.ReservedFor { if reserved.UID == pod.UID { return true @@ -75,7 +75,7 @@ func IsReservedForPod(pod *v1.Pod, claim *resourcev1alpha1.ResourceClaim) bool { } // CanBeReserved checks whether the claim could be reserved for another object. -func CanBeReserved(claim *resourcev1alpha1.ResourceClaim) bool { +func CanBeReserved(claim *resourcev1alpha2.ResourceClaim) bool { return claim.Status.Allocation.Shareable || len(claim.Status.ReservedFor) == 0 } diff --git a/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim_test.go b/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim_test.go index 75dd49ca4df..0518ece1e25 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim_test.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim_test.go @@ -21,7 +21,7 @@ import ( "testing" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -48,14 +48,14 @@ func TestResourceClaimIsForPod(t *testing.T) { UID: newUID(), }, } - claimNoOwner := &resourcev1alpha1.ResourceClaim{ + claimNoOwner := &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: "kube-system", Name: "claimNoOwner", UID: newUID(), }, } - claimWithOwner := &resourcev1alpha1.ResourceClaim{ + claimWithOwner := &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: "kube-system", Name: "claimNoOwner", @@ -68,7 +68,7 @@ func TestResourceClaimIsForPod(t *testing.T) { }, }, } - userClaimWithOwner := &resourcev1alpha1.ResourceClaim{ + userClaimWithOwner := &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: "user-namespace", Name: "userClaimWithOwner", @@ -84,7 +84,7 @@ func TestResourceClaimIsForPod(t *testing.T) { testcases := map[string]struct { pod *v1.Pod - claim *resourcev1alpha1.ResourceClaim + claim *resourcev1alpha2.ResourceClaim expectedError string }{ "owned": { diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index f1cc0b9ff82..59dc3376da3 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -28,7 +28,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -74,7 +74,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu ginkgo.By("waiting for container startup to fail") parameters := b.parameters() - pod, template := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer) b.create(ctx, parameters, pod, template) @@ -97,7 +97,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu }) ginkgo.It("must not run a pod if a claim is not reserved for it", func(ctx context.Context) { parameters := b.parameters() - claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate) + claim := b.externalClaim(resourcev1alpha2.AllocationModeImmediate) pod := b.podExternal() // This bypasses scheduling and therefore the pod gets @@ -120,7 +120,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu }) ginkgo.It("must unprepare resources for force-deleted pod", func(ctx context.Context) { parameters := b.parameters() - claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate) + claim := b.externalClaim(resourcev1alpha2.AllocationModeImmediate) pod := b.podExternal() zero := int64(0) pod.Spec.TerminationGracePeriodSeconds = &zero @@ -153,7 +153,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu ginkgo.It("supports claim and class parameters", func(ctx context.Context) { classParameters := b.parameters("x", "y") claimParameters := b.parameters() - pod, template := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer) b.create(ctx, classParameters, claimParameters, pod, template) @@ -168,7 +168,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu // claimTests tries out several different combinations of pods with // claims, both inline and external. - claimTests := func(allocationMode resourcev1alpha1.AllocationMode) { + claimTests := func(allocationMode resourcev1alpha2.AllocationMode) { ginkgo.It("supports simple pod referencing inline resource claim", func(ctx context.Context) { parameters := b.parameters() pod, template := b.podInline(allocationMode) @@ -233,11 +233,11 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu } ginkgo.Context("with delayed allocation", func() { - claimTests(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + claimTests(resourcev1alpha2.AllocationModeWaitForFirstConsumer) }) ginkgo.Context("with immediate allocation", func() { - claimTests(resourcev1alpha1.AllocationModeImmediate) + claimTests(resourcev1alpha2.AllocationModeImmediate) }) }) @@ -273,7 +273,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu } pod1 := createPod() pod2 := createPod() - claim := b.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + claim := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) b.create(ctx, parameters, claim, pod1, pod2) for _, pod := range []*v1.Pod{pod1, pod2} { @@ -293,7 +293,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu }) b := newBuilder(f, driver) - tests := func(allocationMode resourcev1alpha1.AllocationMode) { + tests := func(allocationMode resourcev1alpha2.AllocationMode) { ginkgo.It("uses all resources", func(ctx context.Context) { var objs = []klog.KMetadata{ b.parameters(), @@ -335,11 +335,11 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu } ginkgo.Context("with delayed allocation", func() { - tests(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + tests(resourcev1alpha2.AllocationModeWaitForFirstConsumer) }) ginkgo.Context("with immediate allocation", func() { - tests(resourcev1alpha1.AllocationModeImmediate) + tests(resourcev1alpha2.AllocationModeImmediate) }) }) @@ -351,8 +351,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu MaxAllocations: 2, Nodes: nodes.NodeNames, - AllocateWrapper: func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string, - handler func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error)) (result *resourcev1alpha1.AllocationResult, err error) { + AllocateWrapper: func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string, + handler func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error)) (result *resourcev1alpha2.AllocationResult, err error) { return allocateWrapper(ctx, claim, claimParameters, class, classParameters, selectedNode, handler) }, } @@ -373,8 +373,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu defer cancel() parameters := b.parameters() - claim1 := b.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer) - claim2 := b.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + claim1 := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) + claim2 := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) pod1 := b.podExternal() pod1.Spec.ResourceClaims = append(pod1.Spec.ResourceClaims, v1.PodResourceClaim{ @@ -389,11 +389,11 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu blockClaim, cancelBlockClaim := context.WithCancel(ctx) defer cancelBlockClaim() var allocated int32 - allocateWrapper = func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, - class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string, - handler func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, - class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error), - ) (result *resourcev1alpha1.AllocationResult, err error) { + allocateWrapper = func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, + class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string, + handler func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, + class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error), + ) (result *resourcev1alpha2.AllocationResult, err error) { oldAllocated := atomic.AddInt32(&allocated, 0) if oldAllocated == 1 && strings.HasPrefix(claim.Name, "external-claim") { <-blockClaim.Done() @@ -409,7 +409,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu ginkgo.By("waiting for one claim to be allocated") var nodeSelector *v1.NodeSelector gomega.Eventually(ctx, func(ctx context.Context) (int, error) { - claims, err := f.ClientSet.ResourceV1alpha1().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{}) + claims, err := f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return 0, err } @@ -429,7 +429,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu // the node selector looks like and can // directly access the key and value from it. ginkgo.By(fmt.Sprintf("create second pod on the same node %s", nodeSelector)) - pod2, template2 := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + pod2, template2 := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer) req := nodeSelector.NodeSelectorTerms[0].MatchExpressions[0] node := req.Values[0] pod2.Spec.NodeSelector = map[string]string{req.Key: node} @@ -476,8 +476,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu ginkgo.It("work", func(ctx context.Context) { parameters1 := b1.parameters() parameters2 := b2.parameters() - claim1 := b1.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer) - claim2 := b2.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer) + claim1 := b1.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) + claim2 := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) pod := b1.podExternal() pod.Spec.ResourceClaims = append(pod.Spec.ResourceClaims, v1.PodResourceClaim{ @@ -513,8 +513,8 @@ func (b *builder) className() string { // class returns the resource class that the builder's other objects // reference. -func (b *builder) class() *resourcev1alpha1.ResourceClass { - class := &resourcev1alpha1.ResourceClass{ +func (b *builder) class() *resourcev1alpha2.ResourceClass { + class := &resourcev1alpha2.ResourceClass{ ObjectMeta: metav1.ObjectMeta{ Name: b.className(), }, @@ -522,7 +522,7 @@ func (b *builder) class() *resourcev1alpha1.ResourceClass { SuitableNodes: b.nodeSelector(), } if b.classParametersName != "" { - class.ParametersRef = &resourcev1alpha1.ResourceClassParametersReference{ + class.ParametersRef = &resourcev1alpha2.ResourceClassParametersReference{ Kind: "ConfigMap", Name: b.classParametersName, Namespace: b.f.Namespace.Name, @@ -551,19 +551,19 @@ func (b *builder) nodeSelector() *v1.NodeSelector { // externalClaim returns external resource claim // that test pods can reference -func (b *builder) externalClaim(allocationMode resourcev1alpha1.AllocationMode) *resourcev1alpha1.ResourceClaim { +func (b *builder) externalClaim(allocationMode resourcev1alpha2.AllocationMode) *resourcev1alpha2.ResourceClaim { b.claimCounter++ name := "external-claim" + b.driver.NameSuffix // This is what podExternal expects. if b.claimCounter > 1 { name += fmt.Sprintf("-%d", b.claimCounter) } - return &resourcev1alpha1.ResourceClaim{ + return &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: resourcev1alpha1.ResourceClaimSpec{ + Spec: resourcev1alpha2.ResourceClaimSpec{ ResourceClassName: b.className(), - ParametersRef: &resourcev1alpha1.ResourceClaimParametersReference{ + ParametersRef: &resourcev1alpha2.ResourceClaimParametersReference{ Kind: "ConfigMap", Name: b.parametersName(), }, @@ -632,7 +632,7 @@ func (b *builder) pod() *v1.Pod { } // makePodInline adds an inline resource claim with default class name and parameters. -func (b *builder) podInline(allocationMode resourcev1alpha1.AllocationMode) (*v1.Pod, *resourcev1alpha1.ResourceClaimTemplate) { +func (b *builder) podInline(allocationMode resourcev1alpha2.AllocationMode) (*v1.Pod, *resourcev1alpha2.ResourceClaimTemplate) { pod := b.pod() pod.Spec.Containers[0].Name = "with-resource" podClaimName := "my-inline-claim" @@ -645,15 +645,15 @@ func (b *builder) podInline(allocationMode resourcev1alpha1.AllocationMode) (*v1 }, }, } - template := &resourcev1alpha1.ResourceClaimTemplate{ + template := &resourcev1alpha2.ResourceClaimTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: pod.Name, Namespace: pod.Namespace, }, - Spec: resourcev1alpha1.ResourceClaimTemplateSpec{ - Spec: resourcev1alpha1.ResourceClaimSpec{ + Spec: resourcev1alpha2.ResourceClaimTemplateSpec{ + Spec: resourcev1alpha2.ResourceClaimSpec{ ResourceClassName: b.className(), - ParametersRef: &resourcev1alpha1.ResourceClaimParametersReference{ + ParametersRef: &resourcev1alpha2.ResourceClaimParametersReference{ Kind: "ConfigMap", Name: b.parametersName(), }, @@ -665,7 +665,7 @@ func (b *builder) podInline(allocationMode resourcev1alpha1.AllocationMode) (*v1 } // podInlineMultiple returns a pod with inline resource claim referenced by 3 containers -func (b *builder) podInlineMultiple(allocationMode resourcev1alpha1.AllocationMode) (*v1.Pod, *resourcev1alpha1.ResourceClaimTemplate) { +func (b *builder) podInlineMultiple(allocationMode resourcev1alpha2.AllocationMode) (*v1.Pod, *resourcev1alpha2.ResourceClaimTemplate) { pod, template := b.podInline(allocationMode) pod.Spec.Containers = append(pod.Spec.Containers, *pod.Spec.Containers[0].DeepCopy(), *pod.Spec.Containers[0].DeepCopy()) pod.Spec.Containers[1].Name = pod.Spec.Containers[1].Name + "-1" @@ -706,16 +706,16 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) { ginkgo.By(fmt.Sprintf("creating %T %s", obj, obj.GetName()), func() { var err error switch obj := obj.(type) { - case *resourcev1alpha1.ResourceClass: - _, err = b.f.ClientSet.ResourceV1alpha1().ResourceClasses().Create(ctx, obj, metav1.CreateOptions{}) + case *resourcev1alpha2.ResourceClass: + _, err = b.f.ClientSet.ResourceV1alpha2().ResourceClasses().Create(ctx, obj, metav1.CreateOptions{}) case *v1.Pod: _, err = b.f.ClientSet.CoreV1().Pods(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{}) case *v1.ConfigMap: _, err = b.f.ClientSet.CoreV1().ConfigMaps(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{}) - case *resourcev1alpha1.ResourceClaim: - _, err = b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{}) - case *resourcev1alpha1.ResourceClaimTemplate: - _, err = b.f.ClientSet.ResourceV1alpha1().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{}) + case *resourcev1alpha2.ResourceClaim: + _, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{}) + case *resourcev1alpha2.ResourceClaimTemplate: + _, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{}) default: framework.Fail(fmt.Sprintf("internal error, unsupported type %T", obj), 1) } @@ -763,7 +763,7 @@ func (b *builder) setUp() { } func (b *builder) tearDown(ctx context.Context) { - err := b.f.ClientSet.ResourceV1alpha1().ResourceClasses().Delete(ctx, b.className(), metav1.DeleteOptions{}) + err := b.f.ClientSet.ResourceV1alpha2().ResourceClasses().Delete(ctx, b.className(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "delete resource class") // Before we allow the namespace and all objects in it do be deleted by @@ -787,14 +787,14 @@ func (b *builder) tearDown(ctx context.Context) { return b.listTestPods(ctx) }).WithTimeout(time.Minute).Should(gomega.BeEmpty(), "remaining pods despite deletion") - claims, err := b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{}) + claims, err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "get resource claims") for _, claim := range claims.Items { if claim.DeletionTimestamp != nil { continue } ginkgo.By(fmt.Sprintf("deleting %T %s", &claim, klog.KObj(&claim))) - err := b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{}) + err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{}) if !apierrors.IsNotFound(err) { framework.ExpectNoError(err, "delete claim") } @@ -806,8 +806,8 @@ func (b *builder) tearDown(ctx context.Context) { } ginkgo.By("waiting for claims to be deallocated and deleted") - gomega.Eventually(func() ([]resourcev1alpha1.ResourceClaim, error) { - claims, err := b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{}) + gomega.Eventually(func() ([]resourcev1alpha2.ResourceClaim, error) { + claims, err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/test/e2e/dra/kind.yaml b/test/e2e/dra/kind.yaml index 9e3e780b8ea..7da297baa37 100644 --- a/test/e2e/dra/kind.yaml +++ b/test/e2e/dra/kind.yaml @@ -13,7 +13,7 @@ nodes: v: "5" apiServer: extraArgs: - runtime-config: "resource.k8s.io/v1alpha1=true" + runtime-config: "resource.k8s.io/v1alpha2=true" - | kind: InitConfiguration nodeRegistration: diff --git a/test/e2e/dra/test-driver/README.md b/test/e2e/dra/test-driver/README.md index 5b507f5c20d..a8362f5850c 100644 --- a/test/e2e/dra/test-driver/README.md +++ b/test/e2e/dra/test-driver/README.md @@ -55,7 +55,7 @@ kubelet<->dynamic resource allocation plugin interaction. To try out the feature, build Kubernetes, then in one console run: ```console -RUNTIME_CONFIG="resource.k8s.io/v1alpha1" FEATURE_GATES=DynamicResourceAllocation=true ALLOW_PRIVILEGED=1 ./hack/local-up-cluster.sh -O +RUNTIME_CONFIG="resource.k8s.io/v1alpha2" FEATURE_GATES=DynamicResourceAllocation=true ALLOW_PRIVILEGED=1 ./hack/local-up-cluster.sh -O ``` In another: diff --git a/test/e2e/dra/test-driver/app/controller.go b/test/e2e/dra/test-driver/app/controller.go index 22e12207a92..baaada8a329 100644 --- a/test/e2e/dra/test-driver/app/controller.go +++ b/test/e2e/dra/test-driver/app/controller.go @@ -28,7 +28,7 @@ import ( "sync" v1 "k8s.io/api/core/v1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" @@ -47,11 +47,11 @@ type Resources struct { AllocateWrapper AllocateWrapperType } -type AllocateWrapperType func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, - class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string, - handler func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, - class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error), -) (result *resourcev1alpha1.AllocationResult, err error) +type AllocateWrapperType func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, + class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string, + handler func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, + class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error), +) (result *resourcev1alpha2.AllocationResult, err error) type ExampleController struct { clientset kubernetes.Interface @@ -122,7 +122,7 @@ func (c *ExampleController) GetNumDeallocations() int64 { return c.numDeallocations } -func (c *ExampleController) GetClassParameters(ctx context.Context, class *resourcev1alpha1.ResourceClass) (interface{}, error) { +func (c *ExampleController) GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error) { if class.ParametersRef != nil { if class.ParametersRef.APIGroup != "" || class.ParametersRef.Kind != "ConfigMap" { @@ -133,7 +133,7 @@ func (c *ExampleController) GetClassParameters(ctx context.Context, class *resou return nil, nil } -func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, class *resourcev1alpha1.ResourceClass, classParameters interface{}) (interface{}, error) { +func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error) { if claim.Spec.ParametersRef != nil { if claim.Spec.ParametersRef.APIGroup != "" || claim.Spec.ParametersRef.Kind != "ConfigMap" { @@ -152,7 +152,7 @@ func (c *ExampleController) readParametersFromConfigMap(ctx context.Context, nam return configMap.Data, nil } -func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) { +func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error) { if c.resources.AllocateWrapper != nil { return c.resources.AllocateWrapper(ctx, claim, claimParameters, class, classParameters, selectedNode, c.allocate) } @@ -160,7 +160,7 @@ func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha } // allocate simply copies parameters as JSON map into ResourceHandle. -func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) { +func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error) { logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID) defer func() { logger.V(3).Info("done", "result", result, "err", err) @@ -210,7 +210,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha } } - allocation := &resourcev1alpha1.AllocationResult{ + allocation := &resourcev1alpha2.AllocationResult{ Shareable: c.resources.Shareable, } p := parameters{ @@ -252,7 +252,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha return allocation, nil } -func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim) error { +func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error { logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Deallocate"), "claim", klog.KObj(claim), "uid", claim.UID) c.mutex.Lock() defer c.mutex.Unlock() diff --git a/test/e2e/dra/test-driver/deploy/example/broken-resourceclass.yaml b/test/e2e/dra/test-driver/deploy/example/broken-resourceclass.yaml index f4ecb6f2eb0..c6cf1c2ec9e 100644 --- a/test/e2e/dra/test-driver/deploy/example/broken-resourceclass.yaml +++ b/test/e2e/dra/test-driver/deploy/example/broken-resourceclass.yaml @@ -2,7 +2,7 @@ # When using it instead of a functional one, scheduling a pod leads to: # Warning FailedScheduling 16s default-scheduler 0/1 nodes are available: 1 excluded via potential node filter in resource class. -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClass metadata: name: example diff --git a/test/e2e/dra/test-driver/deploy/example/pod-external.yaml b/test/e2e/dra/test-driver/deploy/example/pod-external.yaml index 4aea64fd0df..60db6bdf9ea 100644 --- a/test/e2e/dra/test-driver/deploy/example/pod-external.yaml +++ b/test/e2e/dra/test-driver/deploy/example/pod-external.yaml @@ -8,7 +8,7 @@ metadata: data: a: b --- -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaim metadata: name: external-claim diff --git a/test/e2e/dra/test-driver/deploy/example/pod-inline-multiple.yaml b/test/e2e/dra/test-driver/deploy/example/pod-inline-multiple.yaml index 55205b211cb..7d95b3ae026 100644 --- a/test/e2e/dra/test-driver/deploy/example/pod-inline-multiple.yaml +++ b/test/e2e/dra/test-driver/deploy/example/pod-inline-multiple.yaml @@ -6,7 +6,7 @@ metadata: data: a: b --- -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaimTemplate metadata: name: pause-template diff --git a/test/e2e/dra/test-driver/deploy/example/pod-inline.yaml b/test/e2e/dra/test-driver/deploy/example/pod-inline.yaml index 4b6cf6c5c10..13b177c8426 100644 --- a/test/e2e/dra/test-driver/deploy/example/pod-inline.yaml +++ b/test/e2e/dra/test-driver/deploy/example/pod-inline.yaml @@ -8,7 +8,7 @@ metadata: data: a: b --- -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaimTemplate metadata: name: test-inline-claim-template diff --git a/test/e2e/dra/test-driver/deploy/example/pod-shared.yaml b/test/e2e/dra/test-driver/deploy/example/pod-shared.yaml index b32a0e40381..c2ea092f421 100644 --- a/test/e2e/dra/test-driver/deploy/example/pod-shared.yaml +++ b/test/e2e/dra/test-driver/deploy/example/pod-shared.yaml @@ -8,7 +8,7 @@ metadata: data: a: b --- -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaim metadata: name: shared-claim diff --git a/test/e2e/dra/test-driver/deploy/example/resourceclaim.yaml b/test/e2e/dra/test-driver/deploy/example/resourceclaim.yaml index ce37440738d..705891324c9 100644 --- a/test/e2e/dra/test-driver/deploy/example/resourceclaim.yaml +++ b/test/e2e/dra/test-driver/deploy/example/resourceclaim.yaml @@ -6,7 +6,7 @@ metadata: data: a: b --- -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaim metadata: name: example diff --git a/test/e2e/dra/test-driver/deploy/example/resourceclass.yaml b/test/e2e/dra/test-driver/deploy/example/resourceclass.yaml index 42d5c2c9689..948b39fb18f 100644 --- a/test/e2e/dra/test-driver/deploy/example/resourceclass.yaml +++ b/test/e2e/dra/test-driver/deploy/example/resourceclass.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClass metadata: name: example diff --git a/test/integration/apiserver/apply/reset_fields_test.go b/test/integration/apiserver/apply/reset_fields_test.go index a76aeb7fd94..2cefd6c7532 100644 --- a/test/integration/apiserver/apply/reset_fields_test.go +++ b/test/integration/apiserver/apply/reset_fields_test.go @@ -64,8 +64,8 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{ gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`, gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, - gvr("resource.k8s.io", "v1alpha1", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulings. - gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`, + gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulings. + gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`, gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`, // standard for []metav1.Condition gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`, @@ -148,10 +148,10 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{ gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`, gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, - gvr("resource.k8s.io", "v1alpha1", "podschedulings"): `{"spec": {"selectedNode": "node2name"}}`, - gvr("resource.k8s.io", "v1alpha1", "resourceclasses"): `{"driverName": "other.example.com"}`, - gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test. - gvr("resource.k8s.io", "v1alpha1", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`, + gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"spec": {"selectedNode": "node2name"}}`, + gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`, + gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test. + gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`, gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{}`, gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"paramKind": {"apiVersion": "apps/v1", "kind": "Deployment"}}}`, } diff --git a/test/integration/apiserver/apply/status_test.go b/test/integration/apiserver/apply/status_test.go index b729c8baa6f..d6ac9ae27e9 100644 --- a/test/integration/apiserver/apply/status_test.go +++ b/test/integration/apiserver/apply/status_test.go @@ -54,8 +54,8 @@ var statusData = map[schema.GroupVersionResource]string{ gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`, gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`, gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`, - gvr("resource.k8s.io", "v1alpha1", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`, - gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): `{"status": {"driverName": "example.com"}}`, + gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`, + gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "example.com"}}`, gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`, // standard for []metav1.Condition gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`, diff --git a/test/integration/etcd/data.go b/test/integration/etcd/data.go index 76926439574..3f7c9c3a29d 100644 --- a/test/integration/etcd/data.go +++ b/test/integration/etcd/data.go @@ -456,20 +456,20 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes }, // -- - // k8s.io/kubernetes/pkg/apis/resource/v1alpha1 - gvr("resource.k8s.io", "v1alpha1", "resourceclasses"): { + // k8s.io/kubernetes/pkg/apis/resource/v1alpha2 + gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): { Stub: `{"metadata": {"name": "class1name"}, "driverName": "example.com"}`, ExpectedEtcdPath: "/registry/resourceclasses/class1name", }, - gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): { + gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): { Stub: `{"metadata": {"name": "claim1name"}, "spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}`, ExpectedEtcdPath: "/registry/resourceclaims/" + namespace + "/claim1name", }, - gvr("resource.k8s.io", "v1alpha1", "resourceclaimtemplates"): { + gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): { Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`, ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name", }, - gvr("resource.k8s.io", "v1alpha1", "podschedulings"): { + gvr("resource.k8s.io", "v1alpha2", "podschedulings"): { Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`, ExpectedEtcdPath: "/registry/podschedulings/" + namespace + "/pod1name", }, diff --git a/vendor/modules.txt b/vendor/modules.txt index 9052b3faa2f..1c00ed6bb52 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1257,7 +1257,7 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/resource/v1alpha1 +k8s.io/api/resource/v1alpha2 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 @@ -1600,7 +1600,7 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 -k8s.io/client-go/applyconfigurations/resource/v1alpha1 +k8s.io/client-go/applyconfigurations/resource/v1alpha2 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -1673,7 +1673,7 @@ k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/informers/rbac/v1beta1 k8s.io/client-go/informers/resource -k8s.io/client-go/informers/resource/v1alpha1 +k8s.io/client-go/informers/resource/v1alpha2 k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 @@ -1771,8 +1771,8 @@ k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake k8s.io/client-go/kubernetes/typed/rbac/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake -k8s.io/client-go/kubernetes/typed/resource/v1alpha1 -k8s.io/client-go/kubernetes/typed/resource/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/resource/v1alpha2 +k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 @@ -1823,7 +1823,7 @@ k8s.io/client-go/listers/policy/v1beta1 k8s.io/client-go/listers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 -k8s.io/client-go/listers/resource/v1alpha1 +k8s.io/client-go/listers/resource/v1alpha2 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1