Merge pull request #116556 from pohly/dra-podschedulingcontext

dra: PodScheduling -> PodSchedulingContext
This commit is contained in:
Kubernetes Prow Robot 2023-03-14 15:14:34 -07:00 committed by GitHub
commit fbfc887a09
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 1840 additions and 1832 deletions

View File

@ -13035,8 +13035,8 @@
}, },
"type": "object" "type": "object"
}, },
"io.k8s.api.resource.v1alpha2.PodScheduling": { "io.k8s.api.resource.v1alpha2.PodSchedulingContext": {
"description": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
@ -13051,11 +13051,11 @@
"description": "Standard object metadata" "description": "Standard object metadata"
}, },
"spec": { "spec": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingSpec", "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec",
"description": "Spec describes where resources for the Pod are needed." "description": "Spec describes where resources for the Pod are needed."
}, },
"status": { "status": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingStatus", "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus",
"description": "Status describes where resources for the Pod can be allocated." "description": "Status describes where resources for the Pod can be allocated."
} }
}, },
@ -13066,22 +13066,22 @@
"x-kubernetes-group-version-kind": [ "x-kubernetes-group-version-kind": [
{ {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
] ]
}, },
"io.k8s.api.resource.v1alpha2.PodSchedulingList": { "io.k8s.api.resource.v1alpha2.PodSchedulingContextList": {
"description": "PodSchedulingList is a collection of Pod scheduling objects.", "description": "PodSchedulingContextList is a collection of Pod scheduling objects.",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": "string" "type": "string"
}, },
"items": { "items": {
"description": "Items is the list of PodScheduling objects.", "description": "Items is the list of PodSchedulingContext objects.",
"items": { "items": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
}, },
"type": "array" "type": "array"
}, },
@ -13101,13 +13101,13 @@
"x-kubernetes-group-version-kind": [ "x-kubernetes-group-version-kind": [
{ {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodSchedulingList", "kind": "PodSchedulingContextList",
"version": "v1alpha2" "version": "v1alpha2"
} }
] ]
}, },
"io.k8s.api.resource.v1alpha2.PodSchedulingSpec": { "io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec": {
"description": "PodSchedulingSpec describes where resources for the Pod are needed.", "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.",
"properties": { "properties": {
"potentialNodes": { "potentialNodes": {
"description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", "description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.",
@ -13124,8 +13124,8 @@
}, },
"type": "object" "type": "object"
}, },
"io.k8s.api.resource.v1alpha2.PodSchedulingStatus": { "io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus": {
"description": "PodSchedulingStatus describes where resources for the Pod can be allocated.", "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
"properties": { "properties": {
"resourceClaims": { "resourceClaims": {
"description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.",
@ -78180,13 +78180,13 @@
] ]
} }
}, },
"/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings": { "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts": {
"delete": { "delete": {
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "delete collection of PodScheduling", "description": "delete collection of PodSchedulingContext",
"operationId": "deleteResourceV1alpha2CollectionNamespacedPodScheduling", "operationId": "deleteResourceV1alpha2CollectionNamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"in": "body", "in": "body",
@ -78305,7 +78305,7 @@
"x-kubernetes-action": "deletecollection", "x-kubernetes-action": "deletecollection",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -78313,8 +78313,8 @@
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "list or watch objects of kind PodScheduling", "description": "list or watch objects of kind PodSchedulingContext",
"operationId": "listResourceV1alpha2NamespacedPodScheduling", "operationId": "listResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
@ -78398,7 +78398,7 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"401": { "401": {
@ -78414,7 +78414,7 @@
"x-kubernetes-action": "list", "x-kubernetes-action": "list",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -78439,15 +78439,15 @@
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "create a PodScheduling", "description": "create a PodSchedulingContext",
"operationId": "createResourceV1alpha2NamespacedPodScheduling", "operationId": "createResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"in": "body", "in": "body",
"name": "body", "name": "body",
"required": true, "required": true,
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
{ {
@ -78481,19 +78481,19 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"201": { "201": {
"description": "Created", "description": "Created",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"202": { "202": {
"description": "Accepted", "description": "Accepted",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -78509,18 +78509,18 @@
"x-kubernetes-action": "post", "x-kubernetes-action": "post",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}": { "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}": {
"delete": { "delete": {
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "delete a PodScheduling", "description": "delete a PodSchedulingContext",
"operationId": "deleteResourceV1alpha2NamespacedPodScheduling", "operationId": "deleteResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"in": "body", "in": "body",
@ -78567,13 +78567,13 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"202": { "202": {
"description": "Accepted", "description": "Accepted",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -78589,7 +78589,7 @@
"x-kubernetes-action": "delete", "x-kubernetes-action": "delete",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -78597,8 +78597,8 @@
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "read the specified PodScheduling", "description": "read the specified PodSchedulingContext",
"operationId": "readResourceV1alpha2NamespacedPodScheduling", "operationId": "readResourceV1alpha2NamespacedPodSchedulingContext",
"produces": [ "produces": [
"application/json", "application/json",
"application/yaml", "application/yaml",
@ -78608,7 +78608,7 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -78624,13 +78624,13 @@
"x-kubernetes-action": "get", "x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"parameters": [ "parameters": [
{ {
"description": "name of the PodScheduling", "description": "name of the PodSchedulingContext",
"in": "path", "in": "path",
"name": "name", "name": "name",
"required": true, "required": true,
@ -78660,8 +78660,8 @@
"application/strategic-merge-patch+json", "application/strategic-merge-patch+json",
"application/apply-patch+yaml" "application/apply-patch+yaml"
], ],
"description": "partially update the specified PodScheduling", "description": "partially update the specified PodSchedulingContext",
"operationId": "patchResourceV1alpha2NamespacedPodScheduling", "operationId": "patchResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"in": "body", "in": "body",
@ -78709,13 +78709,13 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"201": { "201": {
"description": "Created", "description": "Created",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -78731,7 +78731,7 @@
"x-kubernetes-action": "patch", "x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -78739,15 +78739,15 @@
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "replace the specified PodScheduling", "description": "replace the specified PodSchedulingContext",
"operationId": "replaceResourceV1alpha2NamespacedPodScheduling", "operationId": "replaceResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"in": "body", "in": "body",
"name": "body", "name": "body",
"required": true, "required": true,
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
{ {
@ -78781,13 +78781,13 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"201": { "201": {
"description": "Created", "description": "Created",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -78803,18 +78803,18 @@
"x-kubernetes-action": "put", "x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}/status": { "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}/status": {
"get": { "get": {
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "read status of the specified PodScheduling", "description": "read status of the specified PodSchedulingContext",
"operationId": "readResourceV1alpha2NamespacedPodSchedulingStatus", "operationId": "readResourceV1alpha2NamespacedPodSchedulingContextStatus",
"produces": [ "produces": [
"application/json", "application/json",
"application/yaml", "application/yaml",
@ -78824,7 +78824,7 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -78840,13 +78840,13 @@
"x-kubernetes-action": "get", "x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"parameters": [ "parameters": [
{ {
"description": "name of the PodScheduling", "description": "name of the PodSchedulingContext",
"in": "path", "in": "path",
"name": "name", "name": "name",
"required": true, "required": true,
@ -78876,8 +78876,8 @@
"application/strategic-merge-patch+json", "application/strategic-merge-patch+json",
"application/apply-patch+yaml" "application/apply-patch+yaml"
], ],
"description": "partially update status of the specified PodScheduling", "description": "partially update status of the specified PodSchedulingContext",
"operationId": "patchResourceV1alpha2NamespacedPodSchedulingStatus", "operationId": "patchResourceV1alpha2NamespacedPodSchedulingContextStatus",
"parameters": [ "parameters": [
{ {
"in": "body", "in": "body",
@ -78925,13 +78925,13 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"201": { "201": {
"description": "Created", "description": "Created",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -78947,7 +78947,7 @@
"x-kubernetes-action": "patch", "x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -78955,15 +78955,15 @@
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "replace status of the specified PodScheduling", "description": "replace status of the specified PodSchedulingContext",
"operationId": "replaceResourceV1alpha2NamespacedPodSchedulingStatus", "operationId": "replaceResourceV1alpha2NamespacedPodSchedulingContextStatus",
"parameters": [ "parameters": [
{ {
"in": "body", "in": "body",
"name": "body", "name": "body",
"required": true, "required": true,
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
{ {
@ -78997,13 +78997,13 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"201": { "201": {
"description": "Created", "description": "Created",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"401": { "401": {
@ -79019,7 +79019,7 @@
"x-kubernetes-action": "put", "x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
} }
@ -80496,13 +80496,13 @@
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/podschedulings": { "/apis/resource.k8s.io/v1alpha2/podschedulingcontexts": {
"get": { "get": {
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "list or watch objects of kind PodScheduling", "description": "list or watch objects of kind PodSchedulingContext",
"operationId": "listResourceV1alpha2PodSchedulingForAllNamespaces", "operationId": "listResourceV1alpha2PodSchedulingContextForAllNamespaces",
"produces": [ "produces": [
"application/json", "application/json",
"application/yaml", "application/yaml",
@ -80514,7 +80514,7 @@
"200": { "200": {
"description": "OK", "description": "OK",
"schema": { "schema": {
"$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"401": { "401": {
@ -80530,7 +80530,7 @@
"x-kubernetes-action": "list", "x-kubernetes-action": "list",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -81462,13 +81462,13 @@
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings": { "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulingcontexts": {
"get": { "get": {
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.",
"operationId": "watchResourceV1alpha2NamespacedPodSchedulingList", "operationId": "watchResourceV1alpha2NamespacedPodSchedulingContextList",
"produces": [ "produces": [
"application/json", "application/json",
"application/yaml", "application/yaml",
@ -81496,7 +81496,7 @@
"x-kubernetes-action": "watchlist", "x-kubernetes-action": "watchlist",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -81588,13 +81588,13 @@
} }
] ]
}, },
"/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings/{name}": { "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulingcontexts/{name}": {
"get": { "get": {
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "watch changes to an object of kind PodScheduling. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", "description": "watch changes to an object of kind PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
"operationId": "watchResourceV1alpha2NamespacedPodScheduling", "operationId": "watchResourceV1alpha2NamespacedPodSchedulingContext",
"produces": [ "produces": [
"application/json", "application/json",
"application/yaml", "application/yaml",
@ -81622,7 +81622,7 @@
"x-kubernetes-action": "watch", "x-kubernetes-action": "watch",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -81663,7 +81663,7 @@
"uniqueItems": true "uniqueItems": true
}, },
{ {
"description": "name of the PodScheduling", "description": "name of the PodSchedulingContext",
"in": "path", "in": "path",
"name": "name", "name": "name",
"required": true, "required": true,
@ -82242,13 +82242,13 @@
} }
] ]
}, },
"/apis/resource.k8s.io/v1alpha2/watch/podschedulings": { "/apis/resource.k8s.io/v1alpha2/watch/podschedulingcontexts": {
"get": { "get": {
"consumes": [ "consumes": [
"*/*" "*/*"
], ],
"description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.",
"operationId": "watchResourceV1alpha2PodSchedulingListForAllNamespaces", "operationId": "watchResourceV1alpha2PodSchedulingContextListForAllNamespaces",
"produces": [ "produces": [
"application/json", "application/json",
"application/yaml", "application/yaml",
@ -82276,7 +82276,7 @@
"x-kubernetes-action": "watchlist", "x-kubernetes-action": "watchlist",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },

View File

@ -104,8 +104,8 @@
}, },
"type": "object" "type": "object"
}, },
"io.k8s.api.resource.v1alpha2.PodScheduling": { "io.k8s.api.resource.v1alpha2.PodSchedulingContext": {
"description": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
@ -127,7 +127,7 @@
"spec": { "spec": {
"allOf": [ "allOf": [
{ {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingSpec" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec"
} }
], ],
"default": {}, "default": {},
@ -136,7 +136,7 @@
"status": { "status": {
"allOf": [ "allOf": [
{ {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingStatus" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus"
} }
], ],
"default": {}, "default": {},
@ -150,24 +150,24 @@
"x-kubernetes-group-version-kind": [ "x-kubernetes-group-version-kind": [
{ {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
] ]
}, },
"io.k8s.api.resource.v1alpha2.PodSchedulingList": { "io.k8s.api.resource.v1alpha2.PodSchedulingContextList": {
"description": "PodSchedulingList is a collection of Pod scheduling objects.", "description": "PodSchedulingContextList is a collection of Pod scheduling objects.",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": "string" "type": "string"
}, },
"items": { "items": {
"description": "Items is the list of PodScheduling objects.", "description": "Items is the list of PodSchedulingContext objects.",
"items": { "items": {
"allOf": [ "allOf": [
{ {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
], ],
"default": {} "default": {}
@ -195,13 +195,13 @@
"x-kubernetes-group-version-kind": [ "x-kubernetes-group-version-kind": [
{ {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodSchedulingList", "kind": "PodSchedulingContextList",
"version": "v1alpha2" "version": "v1alpha2"
} }
] ]
}, },
"io.k8s.api.resource.v1alpha2.PodSchedulingSpec": { "io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec": {
"description": "PodSchedulingSpec describes where resources for the Pod are needed.", "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.",
"properties": { "properties": {
"potentialNodes": { "potentialNodes": {
"description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", "description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.",
@ -219,8 +219,8 @@
}, },
"type": "object" "type": "object"
}, },
"io.k8s.api.resource.v1alpha2.PodSchedulingStatus": { "io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus": {
"description": "PodSchedulingStatus describes where resources for the Pod can be allocated.", "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
"properties": { "properties": {
"resourceClaims": { "resourceClaims": {
"description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.",
@ -1878,10 +1878,10 @@
] ]
} }
}, },
"/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings": { "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts": {
"delete": { "delete": {
"description": "delete collection of PodScheduling", "description": "delete collection of PodSchedulingContext",
"operationId": "deleteResourceV1alpha2CollectionNamespacedPodScheduling", "operationId": "deleteResourceV1alpha2CollectionNamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
@ -2032,13 +2032,13 @@
"x-kubernetes-action": "deletecollection", "x-kubernetes-action": "deletecollection",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"get": { "get": {
"description": "list or watch objects of kind PodScheduling", "description": "list or watch objects of kind PodSchedulingContext",
"operationId": "listResourceV1alpha2NamespacedPodScheduling", "operationId": "listResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
@ -2136,27 +2136,27 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/json;stream=watch": { "application/json;stream=watch": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/vnd.kubernetes.protobuf;stream=watch": { "application/vnd.kubernetes.protobuf;stream=watch": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
} }
}, },
@ -2172,7 +2172,7 @@
"x-kubernetes-action": "list", "x-kubernetes-action": "list",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -2198,8 +2198,8 @@
} }
], ],
"post": { "post": {
"description": "create a PodScheduling", "description": "create a PodSchedulingContext",
"operationId": "createResourceV1alpha2NamespacedPodScheduling", "operationId": "createResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
@ -2233,7 +2233,7 @@
"content": { "content": {
"*/*": { "*/*": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
} }
@ -2243,17 +2243,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2263,17 +2263,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2283,17 +2283,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2309,15 +2309,15 @@
"x-kubernetes-action": "post", "x-kubernetes-action": "post",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}": { "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}": {
"delete": { "delete": {
"description": "delete a PodScheduling", "description": "delete a PodSchedulingContext",
"operationId": "deleteResourceV1alpha2NamespacedPodScheduling", "operationId": "deleteResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
@ -2370,17 +2370,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2390,17 +2390,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2416,29 +2416,29 @@
"x-kubernetes-action": "delete", "x-kubernetes-action": "delete",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"get": { "get": {
"description": "read the specified PodScheduling", "description": "read the specified PodSchedulingContext",
"operationId": "readResourceV1alpha2NamespacedPodScheduling", "operationId": "readResourceV1alpha2NamespacedPodSchedulingContext",
"responses": { "responses": {
"200": { "200": {
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2454,13 +2454,13 @@
"x-kubernetes-action": "get", "x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"parameters": [ "parameters": [
{ {
"description": "name of the PodScheduling", "description": "name of the PodSchedulingContext",
"in": "path", "in": "path",
"name": "name", "name": "name",
"required": true, "required": true,
@ -2490,8 +2490,8 @@
} }
], ],
"patch": { "patch": {
"description": "partially update the specified PodScheduling", "description": "partially update the specified PodSchedulingContext",
"operationId": "patchResourceV1alpha2NamespacedPodScheduling", "operationId": "patchResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
@ -2559,17 +2559,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2579,17 +2579,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2605,13 +2605,13 @@
"x-kubernetes-action": "patch", "x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"put": { "put": {
"description": "replace the specified PodScheduling", "description": "replace the specified PodSchedulingContext",
"operationId": "replaceResourceV1alpha2NamespacedPodScheduling", "operationId": "replaceResourceV1alpha2NamespacedPodSchedulingContext",
"parameters": [ "parameters": [
{ {
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
@ -2645,7 +2645,7 @@
"content": { "content": {
"*/*": { "*/*": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
} }
@ -2655,17 +2655,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2675,17 +2675,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2701,31 +2701,31 @@
"x-kubernetes-action": "put", "x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulings/{name}/status": { "/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}/status": {
"get": { "get": {
"description": "read status of the specified PodScheduling", "description": "read status of the specified PodSchedulingContext",
"operationId": "readResourceV1alpha2NamespacedPodSchedulingStatus", "operationId": "readResourceV1alpha2NamespacedPodSchedulingContextStatus",
"responses": { "responses": {
"200": { "200": {
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2741,13 +2741,13 @@
"x-kubernetes-action": "get", "x-kubernetes-action": "get",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"parameters": [ "parameters": [
{ {
"description": "name of the PodScheduling", "description": "name of the PodSchedulingContext",
"in": "path", "in": "path",
"name": "name", "name": "name",
"required": true, "required": true,
@ -2777,8 +2777,8 @@
} }
], ],
"patch": { "patch": {
"description": "partially update status of the specified PodScheduling", "description": "partially update status of the specified PodSchedulingContext",
"operationId": "patchResourceV1alpha2NamespacedPodSchedulingStatus", "operationId": "patchResourceV1alpha2NamespacedPodSchedulingContextStatus",
"parameters": [ "parameters": [
{ {
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
@ -2846,17 +2846,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2866,17 +2866,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2892,13 +2892,13 @@
"x-kubernetes-action": "patch", "x-kubernetes-action": "patch",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
"put": { "put": {
"description": "replace status of the specified PodScheduling", "description": "replace status of the specified PodSchedulingContext",
"operationId": "replaceResourceV1alpha2NamespacedPodSchedulingStatus", "operationId": "replaceResourceV1alpha2NamespacedPodSchedulingContextStatus",
"parameters": [ "parameters": [
{ {
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
@ -2932,7 +2932,7 @@
"content": { "content": {
"*/*": { "*/*": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
} }
@ -2942,17 +2942,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2962,17 +2962,17 @@
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodScheduling" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext"
} }
} }
}, },
@ -2988,7 +2988,7 @@
"x-kubernetes-action": "put", "x-kubernetes-action": "put",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
} }
@ -4936,36 +4936,36 @@
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/podschedulings": { "/apis/resource.k8s.io/v1alpha2/podschedulingcontexts": {
"get": { "get": {
"description": "list or watch objects of kind PodScheduling", "description": "list or watch objects of kind PodSchedulingContext",
"operationId": "listResourceV1alpha2PodSchedulingForAllNamespaces", "operationId": "listResourceV1alpha2PodSchedulingContextForAllNamespaces",
"responses": { "responses": {
"200": { "200": {
"content": { "content": {
"application/json": { "application/json": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/json;stream=watch": { "application/json;stream=watch": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/vnd.kubernetes.protobuf": { "application/vnd.kubernetes.protobuf": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/vnd.kubernetes.protobuf;stream=watch": { "application/vnd.kubernetes.protobuf;stream=watch": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
}, },
"application/yaml": { "application/yaml": {
"schema": { "schema": {
"$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingList" "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContextList"
} }
} }
}, },
@ -4981,7 +4981,7 @@
"x-kubernetes-action": "list", "x-kubernetes-action": "list",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -6197,10 +6197,10 @@
} }
} }
}, },
"/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings": { "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulingcontexts": {
"get": { "get": {
"description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.",
"operationId": "watchResourceV1alpha2NamespacedPodSchedulingList", "operationId": "watchResourceV1alpha2NamespacedPodSchedulingContextList",
"responses": { "responses": {
"200": { "200": {
"content": { "content": {
@ -6242,7 +6242,7 @@
"x-kubernetes-action": "watchlist", "x-kubernetes-action": "watchlist",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -6358,10 +6358,10 @@
} }
] ]
}, },
"/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulings/{name}": { "/apis/resource.k8s.io/v1alpha2/watch/namespaces/{namespace}/podschedulingcontexts/{name}": {
"get": { "get": {
"description": "watch changes to an object of kind PodScheduling. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", "description": "watch changes to an object of kind PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
"operationId": "watchResourceV1alpha2NamespacedPodScheduling", "operationId": "watchResourceV1alpha2NamespacedPodSchedulingContext",
"responses": { "responses": {
"200": { "200": {
"content": { "content": {
@ -6403,7 +6403,7 @@
"x-kubernetes-action": "watch", "x-kubernetes-action": "watch",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },
@ -6454,7 +6454,7 @@
} }
}, },
{ {
"description": "name of the PodScheduling", "description": "name of the PodSchedulingContext",
"in": "path", "in": "path",
"name": "name", "name": "name",
"required": true, "required": true,
@ -7193,10 +7193,10 @@
} }
] ]
}, },
"/apis/resource.k8s.io/v1alpha2/watch/podschedulings": { "/apis/resource.k8s.io/v1alpha2/watch/podschedulingcontexts": {
"get": { "get": {
"description": "watch individual changes to a list of PodScheduling. deprecated: use the 'watch' parameter with a list operation instead.", "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.",
"operationId": "watchResourceV1alpha2PodSchedulingListForAllNamespaces", "operationId": "watchResourceV1alpha2PodSchedulingContextListForAllNamespaces",
"responses": { "responses": {
"200": { "200": {
"content": { "content": {
@ -7238,7 +7238,7 @@
"x-kubernetes-action": "watchlist", "x-kubernetes-action": "watchlist",
"x-kubernetes-group-version-kind": { "x-kubernetes-group-version-kind": {
"group": "resource.k8s.io", "group": "resource.k8s.io",
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"version": "v1alpha2" "version": "v1alpha2"
} }
}, },

View File

@ -58,8 +58,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ResourceClaimList{}, &ResourceClaimList{},
&ResourceClaimTemplate{}, &ResourceClaimTemplate{},
&ResourceClaimTemplateList{}, &ResourceClaimTemplateList{},
&PodScheduling{}, &PodSchedulingContext{},
&PodSchedulingList{}, &PodSchedulingContextList{},
) )
return nil return nil

View File

@ -173,27 +173,27 @@ type ResourceClaimList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodScheduling objects hold information that is needed to schedule // PodSchedulingContext objects hold information that is needed to schedule
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
// mode. // mode.
// //
// This is an alpha type and requires enabling the DynamicResourceAllocation // This is an alpha type and requires enabling the DynamicResourceAllocation
// feature gate. // feature gate.
type PodScheduling struct { type PodSchedulingContext struct {
metav1.TypeMeta metav1.TypeMeta
// Standard object metadata // Standard object metadata
// +optional // +optional
metav1.ObjectMeta metav1.ObjectMeta
// Spec describes where resources for the Pod are needed. // Spec describes where resources for the Pod are needed.
Spec PodSchedulingSpec Spec PodSchedulingContextSpec
// Status describes where resources for the Pod can be allocated. // Status describes where resources for the Pod can be allocated.
Status PodSchedulingStatus Status PodSchedulingContextStatus
} }
// PodSchedulingSpec describes where resources for the Pod are needed. // PodSchedulingContextSpec describes where resources for the Pod are needed.
type PodSchedulingSpec struct { type PodSchedulingContextSpec struct {
// SelectedNode is the node for which allocation of ResourceClaims that // SelectedNode is the node for which allocation of ResourceClaims that
// are referenced by the Pod and that use "WaitForFirstConsumer" // are referenced by the Pod and that use "WaitForFirstConsumer"
// allocation is to be attempted. // allocation is to be attempted.
@ -209,8 +209,8 @@ type PodSchedulingSpec struct {
PotentialNodes []string PotentialNodes []string
} }
// PodSchedulingStatus describes where resources for the Pod can be allocated. // PodSchedulingContextStatus describes where resources for the Pod can be allocated.
type PodSchedulingStatus struct { type PodSchedulingContextStatus struct {
// ResourceClaims describes resource availability for each // ResourceClaims describes resource availability for each
// pod.spec.resourceClaim entry where the corresponding ResourceClaim // pod.spec.resourceClaim entry where the corresponding ResourceClaim
// uses "WaitForFirstConsumer" allocation mode. // uses "WaitForFirstConsumer" allocation mode.
@ -239,21 +239,21 @@ type ResourceClaimSchedulingStatus struct {
} }
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the // PodSchedulingNodeListMaxSize defines the maximum number of entries in the
// node lists that are stored in PodScheduling objects. This limit is part // node lists that are stored in PodSchedulingContext objects. This limit is part
// of the API. // of the API.
const PodSchedulingNodeListMaxSize = 128 const PodSchedulingNodeListMaxSize = 128
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSchedulingList is a collection of Pod scheduling objects. // PodSchedulingContextList is a collection of Pod scheduling objects.
type PodSchedulingList struct { type PodSchedulingContextList struct {
metav1.TypeMeta metav1.TypeMeta
// Standard list metadata // Standard list metadata
// +optional // +optional
metav1.ListMeta metav1.ListMeta
// Items is the list of PodScheduling objects. // Items is the list of PodSchedulingContext objects.
Items []PodScheduling Items []PodSchedulingContext
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -50,43 +50,43 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodScheduling)(nil), (*resource.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContext)(nil), (*resource.PodSchedulingContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PodScheduling_To_resource_PodScheduling(a.(*v1alpha2.PodScheduling), b.(*resource.PodScheduling), scope) return Convert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(a.(*v1alpha2.PodSchedulingContext), b.(*resource.PodSchedulingContext), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*resource.PodScheduling)(nil), (*v1alpha2.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContext)(nil), (*v1alpha2.PodSchedulingContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodScheduling_To_v1alpha2_PodScheduling(a.(*resource.PodScheduling), b.(*v1alpha2.PodScheduling), scope) return Convert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(a.(*resource.PodSchedulingContext), b.(*v1alpha2.PodSchedulingContext), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingList)(nil), (*resource.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContextList)(nil), (*resource.PodSchedulingContextList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(a.(*v1alpha2.PodSchedulingList), b.(*resource.PodSchedulingList), scope) return Convert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(a.(*v1alpha2.PodSchedulingContextList), b.(*resource.PodSchedulingContextList), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingList)(nil), (*v1alpha2.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextList)(nil), (*v1alpha2.PodSchedulingContextList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(a.(*resource.PodSchedulingList), b.(*v1alpha2.PodSchedulingList), scope) return Convert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(a.(*resource.PodSchedulingContextList), b.(*v1alpha2.PodSchedulingContextList), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingSpec)(nil), (*resource.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContextSpec)(nil), (*resource.PodSchedulingContextSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(a.(*v1alpha2.PodSchedulingSpec), b.(*resource.PodSchedulingSpec), scope) return Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(a.(*v1alpha2.PodSchedulingContextSpec), b.(*resource.PodSchedulingContextSpec), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingSpec)(nil), (*v1alpha2.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextSpec)(nil), (*v1alpha2.PodSchedulingContextSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(a.(*resource.PodSchedulingSpec), b.(*v1alpha2.PodSchedulingSpec), scope) return Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(a.(*resource.PodSchedulingContextSpec), b.(*v1alpha2.PodSchedulingContextSpec), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingStatus)(nil), (*resource.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContextStatus)(nil), (*resource.PodSchedulingContextStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(a.(*v1alpha2.PodSchedulingStatus), b.(*resource.PodSchedulingStatus), scope) return Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(a.(*v1alpha2.PodSchedulingContextStatus), b.(*resource.PodSchedulingContextStatus), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingStatus)(nil), (*v1alpha2.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextStatus)(nil), (*v1alpha2.PodSchedulingContextStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(a.(*resource.PodSchedulingStatus), b.(*v1alpha2.PodSchedulingStatus), scope) return Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(a.(*resource.PodSchedulingContextStatus), b.(*v1alpha2.PodSchedulingContextStatus), scope)
}); err != nil { }); err != nil {
return err return err
} }
@ -247,100 +247,100 @@ func Convert_resource_AllocationResult_To_v1alpha2_AllocationResult(in *resource
return autoConvert_resource_AllocationResult_To_v1alpha2_AllocationResult(in, out, s) return autoConvert_resource_AllocationResult_To_v1alpha2_AllocationResult(in, out, s)
} }
func autoConvert_v1alpha2_PodScheduling_To_resource_PodScheduling(in *v1alpha2.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error { func autoConvert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(in *v1alpha2.PodSchedulingContext, out *resource.PodSchedulingContext, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil { if err := Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(&in.Spec, &out.Spec, s); err != nil {
return err return err
} }
if err := Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil { if err := Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(&in.Status, &out.Status, s); err != nil {
return err return err
} }
return nil return nil
} }
// Convert_v1alpha2_PodScheduling_To_resource_PodScheduling is an autogenerated conversion function. // Convert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext is an autogenerated conversion function.
func Convert_v1alpha2_PodScheduling_To_resource_PodScheduling(in *v1alpha2.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error { func Convert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(in *v1alpha2.PodSchedulingContext, out *resource.PodSchedulingContext, s conversion.Scope) error {
return autoConvert_v1alpha2_PodScheduling_To_resource_PodScheduling(in, out, s) return autoConvert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(in, out, s)
} }
func autoConvert_resource_PodScheduling_To_v1alpha2_PodScheduling(in *resource.PodScheduling, out *v1alpha2.PodScheduling, s conversion.Scope) error { func autoConvert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(in *resource.PodSchedulingContext, out *v1alpha2.PodSchedulingContext, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil { if err := Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(&in.Spec, &out.Spec, s); err != nil {
return err return err
} }
if err := Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil { if err := Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(&in.Status, &out.Status, s); err != nil {
return err return err
} }
return nil return nil
} }
// Convert_resource_PodScheduling_To_v1alpha2_PodScheduling is an autogenerated conversion function. // Convert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext is an autogenerated conversion function.
func Convert_resource_PodScheduling_To_v1alpha2_PodScheduling(in *resource.PodScheduling, out *v1alpha2.PodScheduling, s conversion.Scope) error { func Convert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(in *resource.PodSchedulingContext, out *v1alpha2.PodSchedulingContext, s conversion.Scope) error {
return autoConvert_resource_PodScheduling_To_v1alpha2_PodScheduling(in, out, s) return autoConvert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(in, out, s)
} }
func autoConvert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha2.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error { func autoConvert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(in *v1alpha2.PodSchedulingContextList, out *resource.PodSchedulingContextList, s conversion.Scope) error {
out.ListMeta = in.ListMeta out.ListMeta = in.ListMeta
out.Items = *(*[]resource.PodScheduling)(unsafe.Pointer(&in.Items)) out.Items = *(*[]resource.PodSchedulingContext)(unsafe.Pointer(&in.Items))
return nil return nil
} }
// Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList is an autogenerated conversion function. // Convert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList is an autogenerated conversion function.
func Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha2.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error { func Convert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(in *v1alpha2.PodSchedulingContextList, out *resource.PodSchedulingContextList, s conversion.Scope) error {
return autoConvert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in, out, s) return autoConvert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(in, out, s)
} }
func autoConvert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha2.PodSchedulingList, s conversion.Scope) error { func autoConvert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(in *resource.PodSchedulingContextList, out *v1alpha2.PodSchedulingContextList, s conversion.Scope) error {
out.ListMeta = in.ListMeta out.ListMeta = in.ListMeta
out.Items = *(*[]v1alpha2.PodScheduling)(unsafe.Pointer(&in.Items)) out.Items = *(*[]v1alpha2.PodSchedulingContext)(unsafe.Pointer(&in.Items))
return nil return nil
} }
// Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList is an autogenerated conversion function. // Convert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList is an autogenerated conversion function.
func Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha2.PodSchedulingList, s conversion.Scope) error { func Convert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(in *resource.PodSchedulingContextList, out *v1alpha2.PodSchedulingContextList, s conversion.Scope) error {
return autoConvert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in, out, s) return autoConvert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(in, out, s)
} }
func autoConvert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha2.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error { func autoConvert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in *v1alpha2.PodSchedulingContextSpec, out *resource.PodSchedulingContextSpec, s conversion.Scope) error {
out.SelectedNode = in.SelectedNode out.SelectedNode = in.SelectedNode
out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes))
return nil return nil
} }
// Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec is an autogenerated conversion function. // Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec is an autogenerated conversion function.
func Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha2.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error { func Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in *v1alpha2.PodSchedulingContextSpec, out *resource.PodSchedulingContextSpec, s conversion.Scope) error {
return autoConvert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in, out, s) return autoConvert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in, out, s)
} }
func autoConvert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha2.PodSchedulingSpec, s conversion.Scope) error { func autoConvert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(in *resource.PodSchedulingContextSpec, out *v1alpha2.PodSchedulingContextSpec, s conversion.Scope) error {
out.SelectedNode = in.SelectedNode out.SelectedNode = in.SelectedNode
out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes))
return nil return nil
} }
// Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec is an autogenerated conversion function. // Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec is an autogenerated conversion function.
func Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha2.PodSchedulingSpec, s conversion.Scope) error { func Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(in *resource.PodSchedulingContextSpec, out *v1alpha2.PodSchedulingContextSpec, s conversion.Scope) error {
return autoConvert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in, out, s) return autoConvert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(in, out, s)
} }
func autoConvert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha2.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error { func autoConvert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in *v1alpha2.PodSchedulingContextStatus, out *resource.PodSchedulingContextStatus, s conversion.Scope) error {
out.ResourceClaims = *(*[]resource.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) out.ResourceClaims = *(*[]resource.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims))
return nil return nil
} }
// Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus is an autogenerated conversion function. // Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus is an autogenerated conversion function.
func Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha2.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error { func Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in *v1alpha2.PodSchedulingContextStatus, out *resource.PodSchedulingContextStatus, s conversion.Scope) error {
return autoConvert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in, out, s) return autoConvert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in, out, s)
} }
func autoConvert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha2.PodSchedulingStatus, s conversion.Scope) error { func autoConvert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(in *resource.PodSchedulingContextStatus, out *v1alpha2.PodSchedulingContextStatus, s conversion.Scope) error {
out.ResourceClaims = *(*[]v1alpha2.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) out.ResourceClaims = *(*[]v1alpha2.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims))
return nil return nil
} }
// Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus is an autogenerated conversion function. // Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus is an autogenerated conversion function.
func Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha2.PodSchedulingStatus, s conversion.Scope) error { func Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(in *resource.PodSchedulingContextStatus, out *v1alpha2.PodSchedulingContextStatus, s conversion.Scope) error {
return autoConvert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in, out, s) return autoConvert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(in, out, s)
} }
func autoConvert_v1alpha2_ResourceClaim_To_resource_ResourceClaim(in *v1alpha2.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error { func autoConvert_v1alpha2_ResourceClaim_To_resource_ResourceClaim(in *v1alpha2.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {

View File

@ -253,33 +253,33 @@ func validateResourceClaimConsumers(consumers []resource.ResourceClaimConsumerRe
return allErrs return allErrs
} }
// ValidatePodScheduling validates a PodScheduling. // ValidatePodSchedulingContext validates a PodSchedulingContext.
func ValidatePodScheduling(resourceClaim *resource.PodScheduling) field.ErrorList { func ValidatePodSchedulingContexts(schedulingCtx *resource.PodSchedulingContext) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata")) allErrs := corevalidation.ValidateObjectMeta(&schedulingCtx.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
allErrs = append(allErrs, validatePodSchedulingSpec(&resourceClaim.Spec, field.NewPath("spec"))...) allErrs = append(allErrs, validatePodSchedulingSpec(&schedulingCtx.Spec, field.NewPath("spec"))...)
return allErrs return allErrs
} }
func validatePodSchedulingSpec(spec *resource.PodSchedulingSpec, fldPath *field.Path) field.ErrorList { func validatePodSchedulingSpec(spec *resource.PodSchedulingContextSpec, fldPath *field.Path) field.ErrorList {
allErrs := validateSliceIsASet(spec.PotentialNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, fldPath.Child("potentialNodes")) allErrs := validateSliceIsASet(spec.PotentialNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, fldPath.Child("potentialNodes"))
return allErrs return allErrs
} }
// ValidatePodSchedulingUpdate tests if an update to PodScheduling is valid. // ValidatePodSchedulingContextUpdate tests if an update to PodSchedulingContext is valid.
func ValidatePodSchedulingUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList { func ValidatePodSchedulingContextUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata")) allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePodScheduling(resourceClaim)...) allErrs = append(allErrs, ValidatePodSchedulingContexts(schedulingCtx)...)
return allErrs return allErrs
} }
// ValidatePodSchedulingStatusUpdate tests if an update to the status of a PodScheduling is valid. // ValidatePodSchedulingContextStatusUpdate tests if an update to the status of a PodSchedulingContext is valid.
func ValidatePodSchedulingStatusUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList { func ValidatePodSchedulingContextStatusUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata")) allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, validatePodSchedulingStatus(&resourceClaim.Status, field.NewPath("status"))...) allErrs = append(allErrs, validatePodSchedulingStatus(&schedulingCtx.Status, field.NewPath("status"))...)
return allErrs return allErrs
} }
func validatePodSchedulingStatus(status *resource.PodSchedulingStatus, fldPath *field.Path) field.ErrorList { func validatePodSchedulingStatus(status *resource.PodSchedulingContextStatus, fldPath *field.Path) field.ErrorList {
return validatePodSchedulingClaims(status.ResourceClaims, fldPath.Child("claims")) return validatePodSchedulingClaims(status.ResourceClaims, fldPath.Child("claims"))
} }

View File

@ -1,338 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/pointer"
)
func testPodScheduling(name, namespace string, spec resource.PodSchedulingSpec) *resource.PodScheduling {
return &resource.PodScheduling{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: spec,
}
}
func TestValidatePodScheduling(t *testing.T) {
goodName := "foo"
goodNS := "ns"
goodPodSchedulingSpec := resource.PodSchedulingSpec{}
now := metav1.Now()
badName := "!@#$%^"
badValue := "spaces not allowed"
scenarios := map[string]struct {
scheduling *resource.PodScheduling
wantFailures field.ErrorList
}{
"good-scheduling": {
scheduling: testPodScheduling(goodName, goodNS, goodPodSchedulingSpec),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
scheduling: testPodScheduling("", goodNS, goodPodSchedulingSpec),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
scheduling: testPodScheduling(badName, goodNS, goodPodSchedulingSpec),
},
"missing-namespace": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
scheduling: testPodScheduling(goodName, "", goodPodSchedulingSpec),
},
"generate-name": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.GenerateName = "pvc-"
return scheduling
}(),
},
"uid": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return scheduling
}(),
},
"resource-version": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.ResourceVersion = "1"
return scheduling
}(),
},
"generation": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Generation = 100
return scheduling
}(),
},
"creation-timestamp": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.CreationTimestamp = now
return scheduling
}(),
},
"deletion-grace-period-seconds": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.DeletionGracePeriodSeconds = pointer.Int64(10)
return scheduling
}(),
},
"owner-references": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return scheduling
}(),
},
"finalizers": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Finalizers = []string{
"example.com/foo",
}
return scheduling
}(),
},
"managed-fields": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return scheduling
}(),
},
"good-labels": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return scheduling
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Labels = map[string]string{
"hello-world": badValue,
}
return scheduling
}(),
},
"good-annotations": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Annotations = map[string]string{
"foo": "bar",
}
return scheduling
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Annotations = map[string]string{
badName: "hello world",
}
return scheduling
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidatePodScheduling(scenario.scheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidatePodSchedulingUpdate(t *testing.T) {
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
badName := "!@#$%^"
scenarios := map[string]struct {
oldScheduling *resource.PodScheduling
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
},
"add-selected-node": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Spec.SelectedNode = "worker1"
return scheduling
},
},
"add-potential-nodes": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
}
return scheduling
},
},
"invalid-potential-nodes-too-long": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
}
return scheduling
},
},
"invalid-potential-nodes-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, badName)
return scheduling
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldScheduling.ResourceVersion = "1"
errs := ValidatePodSchedulingUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
badName := "!@#$%^"
scenarios := map[string]struct {
oldScheduling *resource.PodScheduling
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
},
"add-claim-status": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
fmt.Sprintf("worker%d", i),
)
}
return scheduling
},
},
"invalid-duplicated-claim-status": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
for i := 0; i < 2; i++ {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
)
}
return scheduling
},
},
"invalid-too-long-claim-status": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
fmt.Sprintf("worker%d", i),
)
}
return scheduling
},
},
"invalid-node-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
badName,
)
return scheduling
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldScheduling.ResourceVersion = "1"
errs := ValidatePodSchedulingStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@ -0,0 +1,342 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/pointer"
)
func testPodSchedulingContexts(name, namespace string, spec resource.PodSchedulingContextSpec) *resource.PodSchedulingContext {
return &resource.PodSchedulingContext{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: spec,
}
}
func TestValidatePodSchedulingContexts(t *testing.T) {
goodName := "foo"
goodNS := "ns"
goodPodSchedulingSpec := resource.PodSchedulingContextSpec{}
now := metav1.Now()
badName := "!@#$%^"
badValue := "spaces not allowed"
scenarios := map[string]struct {
schedulingCtx *resource.PodSchedulingContext
wantFailures field.ErrorList
}{
"good-schedulingCtx": {
schedulingCtx: testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
schedulingCtx: testPodSchedulingContexts("", goodNS, goodPodSchedulingSpec),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
schedulingCtx: testPodSchedulingContexts(badName, goodNS, goodPodSchedulingSpec),
},
"missing-namespace": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
schedulingCtx: testPodSchedulingContexts(goodName, "", goodPodSchedulingSpec),
},
"generate-name": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.GenerateName = "pvc-"
return schedulingCtx
}(),
},
"uid": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return schedulingCtx
}(),
},
"resource-version": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.ResourceVersion = "1"
return schedulingCtx
}(),
},
"generation": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.Generation = 100
return schedulingCtx
}(),
},
"creation-timestamp": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.CreationTimestamp = now
return schedulingCtx
}(),
},
"deletion-grace-period-seconds": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.DeletionGracePeriodSeconds = pointer.Int64(10)
return schedulingCtx
}(),
},
"owner-references": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return schedulingCtx
}(),
},
"finalizers": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.Finalizers = []string{
"example.com/foo",
}
return schedulingCtx
}(),
},
"managed-fields": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return schedulingCtx
}(),
},
"good-labels": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return schedulingCtx
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.Labels = map[string]string{
"hello-world": badValue,
}
return schedulingCtx
}(),
},
"good-annotations": {
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.Annotations = map[string]string{
"foo": "bar",
}
return schedulingCtx
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
schedulingCtx: func() *resource.PodSchedulingContext {
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
schedulingCtx.Annotations = map[string]string{
badName: "hello world",
}
return schedulingCtx
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidatePodSchedulingContexts(scenario.schedulingCtx)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidatePodSchedulingUpdate(t *testing.T) {
validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{})
badName := "!@#$%^"
scenarios := map[string]struct {
oldScheduling *resource.PodSchedulingContext
update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
return schedulingCtx
},
},
"add-selected-node": {
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
schedulingCtx.Spec.SelectedNode = "worker1"
return schedulingCtx
},
},
"add-potential-nodes": {
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
}
return schedulingCtx
},
},
"invalid-potential-nodes-too-long": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
}
return schedulingCtx
},
},
"invalid-potential-nodes-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, badName)
return schedulingCtx
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldScheduling.ResourceVersion = "1"
errs := ValidatePodSchedulingContextUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{})
badName := "!@#$%^"
scenarios := map[string]struct {
oldScheduling *resource.PodSchedulingContext
update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
return schedulingCtx
},
},
"add-claim-status": {
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
fmt.Sprintf("worker%d", i),
)
}
return schedulingCtx
},
},
"invalid-duplicated-claim-status": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
for i := 0; i < 2; i++ {
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
)
}
return schedulingCtx
},
},
"invalid-too-long-claim-status": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
fmt.Sprintf("worker%d", i),
)
}
return schedulingCtx
},
},
"invalid-node-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
oldScheduling: validScheduling,
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
badName,
)
return schedulingCtx
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldScheduling.ResourceVersion = "1"
errs := ValidatePodSchedulingContextStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@ -48,7 +48,7 @@ func (in *AllocationResult) DeepCopy() *AllocationResult {
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
@ -57,18 +57,18 @@ func (in *PodScheduling) DeepCopyInto(out *PodScheduling) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScheduling. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext.
func (in *PodScheduling) DeepCopy() *PodScheduling { func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodScheduling) out := new(PodSchedulingContext)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodScheduling) DeepCopyObject() runtime.Object { func (in *PodSchedulingContext) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil { if c := in.DeepCopy(); c != nil {
return c return c
} }
@ -76,13 +76,13 @@ func (in *PodScheduling) DeepCopyObject() runtime.Object {
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta) in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil { if in.Items != nil {
in, out := &in.Items, &out.Items in, out := &in.Items, &out.Items
*out = make([]PodScheduling, len(*in)) *out = make([]PodSchedulingContext, len(*in))
for i := range *in { for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i]) (*in)[i].DeepCopyInto(&(*out)[i])
} }
@ -90,18 +90,18 @@ func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingList. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList.
func (in *PodSchedulingList) DeepCopy() *PodSchedulingList { func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodSchedulingList) out := new(PodSchedulingContextList)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSchedulingList) DeepCopyObject() runtime.Object { func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil { if c := in.DeepCopy(); c != nil {
return c return c
} }
@ -109,7 +109,7 @@ func (in *PodSchedulingList) DeepCopyObject() runtime.Object {
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) {
*out = *in *out = *in
if in.PotentialNodes != nil { if in.PotentialNodes != nil {
in, out := &in.PotentialNodes, &out.PotentialNodes in, out := &in.PotentialNodes, &out.PotentialNodes
@ -119,18 +119,18 @@ func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingSpec. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec.
func (in *PodSchedulingSpec) DeepCopy() *PodSchedulingSpec { func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodSchedulingSpec) out := new(PodSchedulingContextSpec)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) {
*out = *in *out = *in
if in.ResourceClaims != nil { if in.ResourceClaims != nil {
in, out := &in.ResourceClaims, &out.ResourceClaims in, out := &in.ResourceClaims, &out.ResourceClaims
@ -142,12 +142,12 @@ func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingStatus. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus.
func (in *PodSchedulingStatus) DeepCopy() *PodSchedulingStatus { func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodSchedulingStatus) out := new(PodSchedulingContextStatus)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }

View File

@ -825,10 +825,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"k8s.io/api/rbac/v1beta1.RoleRef": schema_k8sio_api_rbac_v1beta1_RoleRef(ref), "k8s.io/api/rbac/v1beta1.RoleRef": schema_k8sio_api_rbac_v1beta1_RoleRef(ref),
"k8s.io/api/rbac/v1beta1.Subject": schema_k8sio_api_rbac_v1beta1_Subject(ref), "k8s.io/api/rbac/v1beta1.Subject": schema_k8sio_api_rbac_v1beta1_Subject(ref),
"k8s.io/api/resource/v1alpha2.AllocationResult": schema_k8sio_api_resource_v1alpha2_AllocationResult(ref), "k8s.io/api/resource/v1alpha2.AllocationResult": schema_k8sio_api_resource_v1alpha2_AllocationResult(ref),
"k8s.io/api/resource/v1alpha2.PodScheduling": schema_k8sio_api_resource_v1alpha2_PodScheduling(ref), "k8s.io/api/resource/v1alpha2.PodSchedulingContext": schema_k8sio_api_resource_v1alpha2_PodSchedulingContext(ref),
"k8s.io/api/resource/v1alpha2.PodSchedulingList": schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref), "k8s.io/api/resource/v1alpha2.PodSchedulingContextList": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextList(ref),
"k8s.io/api/resource/v1alpha2.PodSchedulingSpec": schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref), "k8s.io/api/resource/v1alpha2.PodSchedulingContextSpec": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextSpec(ref),
"k8s.io/api/resource/v1alpha2.PodSchedulingStatus": schema_k8sio_api_resource_v1alpha2_PodSchedulingStatus(ref), "k8s.io/api/resource/v1alpha2.PodSchedulingContextStatus": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextStatus(ref),
"k8s.io/api/resource/v1alpha2.ResourceClaim": schema_k8sio_api_resource_v1alpha2_ResourceClaim(ref), "k8s.io/api/resource/v1alpha2.ResourceClaim": schema_k8sio_api_resource_v1alpha2_ResourceClaim(ref),
"k8s.io/api/resource/v1alpha2.ResourceClaimConsumerReference": schema_k8sio_api_resource_v1alpha2_ResourceClaimConsumerReference(ref), "k8s.io/api/resource/v1alpha2.ResourceClaimConsumerReference": schema_k8sio_api_resource_v1alpha2_ResourceClaimConsumerReference(ref),
"k8s.io/api/resource/v1alpha2.ResourceClaimList": schema_k8sio_api_resource_v1alpha2_ResourceClaimList(ref), "k8s.io/api/resource/v1alpha2.ResourceClaimList": schema_k8sio_api_resource_v1alpha2_ResourceClaimList(ref),
@ -41156,11 +41156,11 @@ func schema_k8sio_api_resource_v1alpha2_AllocationResult(ref common.ReferenceCal
} }
} }
func schema_k8sio_api_resource_v1alpha2_PodScheduling(ref common.ReferenceCallback) common.OpenAPIDefinition { func schema_k8sio_api_resource_v1alpha2_PodSchedulingContext(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", Description: "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"kind": { "kind": {
@ -41188,14 +41188,14 @@ func schema_k8sio_api_resource_v1alpha2_PodScheduling(ref common.ReferenceCallba
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Spec describes where resources for the Pod are needed.", Description: "Spec describes where resources for the Pod are needed.",
Default: map[string]interface{}{}, Default: map[string]interface{}{},
Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingSpec"), Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingContextSpec"),
}, },
}, },
"status": { "status": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Status describes where resources for the Pod can be allocated.", Description: "Status describes where resources for the Pod can be allocated.",
Default: map[string]interface{}{}, Default: map[string]interface{}{},
Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingStatus"), Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingContextStatus"),
}, },
}, },
}, },
@ -41203,15 +41203,15 @@ func schema_k8sio_api_resource_v1alpha2_PodScheduling(ref common.ReferenceCallba
}, },
}, },
Dependencies: []string{ Dependencies: []string{
"k8s.io/api/resource/v1alpha2.PodSchedulingSpec", "k8s.io/api/resource/v1alpha2.PodSchedulingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, "k8s.io/api/resource/v1alpha2.PodSchedulingContextSpec", "k8s.io/api/resource/v1alpha2.PodSchedulingContextStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
} }
} }
func schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref common.ReferenceCallback) common.OpenAPIDefinition { func schema_k8sio_api_resource_v1alpha2_PodSchedulingContextList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "PodSchedulingList is a collection of Pod scheduling objects.", Description: "PodSchedulingContextList is a collection of Pod scheduling objects.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"kind": { "kind": {
@ -41237,13 +41237,13 @@ func schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref common.ReferenceCa
}, },
"items": { "items": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Items is the list of PodScheduling objects.", Description: "Items is the list of PodSchedulingContext objects.",
Type: []string{"array"}, Type: []string{"array"},
Items: &spec.SchemaOrArray{ Items: &spec.SchemaOrArray{
Schema: &spec.Schema{ Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{}, Default: map[string]interface{}{},
Ref: ref("k8s.io/api/resource/v1alpha2.PodScheduling"), Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingContext"),
}, },
}, },
}, },
@ -41254,15 +41254,15 @@ func schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref common.ReferenceCa
}, },
}, },
Dependencies: []string{ Dependencies: []string{
"k8s.io/api/resource/v1alpha2.PodScheduling", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, "k8s.io/api/resource/v1alpha2.PodSchedulingContext", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
} }
} }
func schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { func schema_k8sio_api_resource_v1alpha2_PodSchedulingContextSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "PodSchedulingSpec describes where resources for the Pod are needed.", Description: "PodSchedulingContextSpec describes where resources for the Pod are needed.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"selectedNode": { "selectedNode": {
@ -41298,11 +41298,11 @@ func schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref common.ReferenceCa
} }
} }
func schema_k8sio_api_resource_v1alpha2_PodSchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { func schema_k8sio_api_resource_v1alpha2_PodSchedulingContextStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "PodSchedulingStatus describes where resources for the Pod can be allocated.", Description: "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"resourceClaims": { "resourceClaims": {

View File

@ -638,13 +638,13 @@ func AddHandlers(h printers.PrintHandler) {
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplate) _ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplate)
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplateList) _ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplateList)
podSchedulingColumnDefinitions := []metav1.TableColumnDefinition{ podSchedulingCtxColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "SelectedNode", Type: "string", Description: resourcev1alpha2.PodSchedulingSpec{}.SwaggerDoc()["selectedNode"]}, {Name: "SelectedNode", Type: "string", Description: resourcev1alpha2.PodSchedulingContextSpec{}.SwaggerDoc()["selectedNode"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
} }
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodScheduling) _ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContext)
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodSchedulingList) _ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContextList)
} }
// Pass ports=nil for all ports. // Pass ports=nil for all ports.
@ -2870,7 +2870,7 @@ func printResourceClaimTemplateList(list *resource.ResourceClaimTemplateList, op
return rows, nil return rows, nil
} }
func printPodScheduling(obj *resource.PodScheduling, options printers.GenerateOptions) ([]metav1.TableRow, error) { func printPodSchedulingContext(obj *resource.PodSchedulingContext, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{ row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj}, Object: runtime.RawExtension{Object: obj},
} }
@ -2879,10 +2879,10 @@ func printPodScheduling(obj *resource.PodScheduling, options printers.GenerateOp
return []metav1.TableRow{row}, nil return []metav1.TableRow{row}, nil
} }
func printPodSchedulingList(list *resource.PodSchedulingList, options printers.GenerateOptions) ([]metav1.TableRow, error) { func printPodSchedulingContextList(list *resource.PodSchedulingContextList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items)) rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items { for i := range list.Items {
r, err := printPodScheduling(&list.Items[i], options) r, err := printPodSchedulingContext(&list.Items[i], options)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -28,54 +28,54 @@ import (
"k8s.io/kubernetes/pkg/printers" "k8s.io/kubernetes/pkg/printers"
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
printerstorage "k8s.io/kubernetes/pkg/printers/storage" printerstorage "k8s.io/kubernetes/pkg/printers/storage"
"k8s.io/kubernetes/pkg/registry/resource/podscheduling" "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
) )
// REST implements a RESTStorage for PodSchedulings. // REST implements a RESTStorage for PodSchedulingContext.
type REST struct { type REST struct {
*genericregistry.Store *genericregistry.Store
} }
// NewREST returns a RESTStorage object that will work against PodSchedulings. // NewREST returns a RESTStorage object that will work against PodSchedulingContext.
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) { func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) {
store := &genericregistry.Store{ store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &resource.PodScheduling{} }, NewFunc: func() runtime.Object { return &resource.PodSchedulingContext{} },
NewListFunc: func() runtime.Object { return &resource.PodSchedulingList{} }, NewListFunc: func() runtime.Object { return &resource.PodSchedulingContextList{} },
PredicateFunc: podscheduling.Match, PredicateFunc: podschedulingcontext.Match,
DefaultQualifiedResource: resource.Resource("podschedulings"), DefaultQualifiedResource: resource.Resource("podschedulingcontexts"),
SingularQualifiedResource: resource.Resource("podscheduling"), SingularQualifiedResource: resource.Resource("podschedulingcontext"),
CreateStrategy: podscheduling.Strategy, CreateStrategy: podschedulingcontext.Strategy,
UpdateStrategy: podscheduling.Strategy, UpdateStrategy: podschedulingcontext.Strategy,
DeleteStrategy: podscheduling.Strategy, DeleteStrategy: podschedulingcontext.Strategy,
ReturnDeletedObject: true, ReturnDeletedObject: true,
ResetFieldsStrategy: podscheduling.Strategy, ResetFieldsStrategy: podschedulingcontext.Strategy,
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
} }
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podscheduling.GetAttrs} options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podschedulingcontext.GetAttrs}
if err := store.CompleteWithOptions(options); err != nil { if err := store.CompleteWithOptions(options); err != nil {
return nil, nil, err return nil, nil, err
} }
statusStore := *store statusStore := *store
statusStore.UpdateStrategy = podscheduling.StatusStrategy statusStore.UpdateStrategy = podschedulingcontext.StatusStrategy
statusStore.ResetFieldsStrategy = podscheduling.StatusStrategy statusStore.ResetFieldsStrategy = podschedulingcontext.StatusStrategy
rest := &REST{store} rest := &REST{store}
return rest, &StatusREST{store: &statusStore}, nil return rest, &StatusREST{store: &statusStore}, nil
} }
// StatusREST implements the REST endpoint for changing the status of a PodScheduling. // StatusREST implements the REST endpoint for changing the status of a PodSchedulingContext.
type StatusREST struct { type StatusREST struct {
store *genericregistry.Store store *genericregistry.Store
} }
// New creates a new PodScheduling object. // New creates a new PodSchedulingContext object.
func (r *StatusREST) New() runtime.Object { func (r *StatusREST) New() runtime.Object {
return &resource.PodScheduling{} return &resource.PodSchedulingContext{}
} }
func (r *StatusREST) Destroy() { func (r *StatusREST) Destroy() {

View File

@ -41,7 +41,7 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer)
StorageConfig: etcdStorage, StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage, Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 1, DeleteCollectionWorkers: 1,
ResourcePrefix: "podschedulings", ResourcePrefix: "podschedulingcontexts",
} }
podSchedulingStorage, statusStorage, err := NewREST(restOptions) podSchedulingStorage, statusStorage, err := NewREST(restOptions)
if err != nil { if err != nil {
@ -50,18 +50,18 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer)
return podSchedulingStorage, statusStorage, server return podSchedulingStorage, statusStorage, server
} }
func validNewPodScheduling(name, ns string) *resource.PodScheduling { func validNewPodSchedulingContexts(name, ns string) *resource.PodSchedulingContext {
scheduling := &resource.PodScheduling{ schedulingCtx := &resource.PodSchedulingContext{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Namespace: ns, Namespace: ns,
}, },
Spec: resource.PodSchedulingSpec{ Spec: resource.PodSchedulingContextSpec{
SelectedNode: "worker", SelectedNode: "worker",
}, },
Status: resource.PodSchedulingStatus{}, Status: resource.PodSchedulingContextStatus{},
} }
return scheduling return schedulingCtx
} }
func TestCreate(t *testing.T) { func TestCreate(t *testing.T) {
@ -69,13 +69,13 @@ func TestCreate(t *testing.T) {
defer server.Terminate(t) defer server.Terminate(t)
defer storage.Store.DestroyFunc() defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store) test := genericregistrytest.New(t, storage.Store)
scheduling := validNewPodScheduling("foo", metav1.NamespaceDefault) schedulingCtx := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)
scheduling.ObjectMeta = metav1.ObjectMeta{} schedulingCtx.ObjectMeta = metav1.ObjectMeta{}
test.TestCreate( test.TestCreate(
// valid // valid
scheduling, schedulingCtx,
// invalid // invalid
&resource.PodScheduling{ &resource.PodSchedulingContext{
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"}, ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
}, },
) )
@ -88,10 +88,10 @@ func TestUpdate(t *testing.T) {
test := genericregistrytest.New(t, storage.Store) test := genericregistrytest.New(t, storage.Store)
test.TestUpdate( test.TestUpdate(
// valid // valid
validNewPodScheduling("foo", metav1.NamespaceDefault), validNewPodSchedulingContexts("foo", metav1.NamespaceDefault),
// updateFunc // updateFunc
func(obj runtime.Object) runtime.Object { func(obj runtime.Object) runtime.Object {
object := obj.(*resource.PodScheduling) object := obj.(*resource.PodSchedulingContext)
if object.Labels == nil { if object.Labels == nil {
object.Labels = map[string]string{} object.Labels = map[string]string{}
} }
@ -106,7 +106,7 @@ func TestDelete(t *testing.T) {
defer server.Terminate(t) defer server.Terminate(t)
defer storage.Store.DestroyFunc() defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject() test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
test.TestDelete(validNewPodScheduling("foo", metav1.NamespaceDefault)) test.TestDelete(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
} }
func TestGet(t *testing.T) { func TestGet(t *testing.T) {
@ -114,7 +114,7 @@ func TestGet(t *testing.T) {
defer server.Terminate(t) defer server.Terminate(t)
defer storage.Store.DestroyFunc() defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store) test := genericregistrytest.New(t, storage.Store)
test.TestGet(validNewPodScheduling("foo", metav1.NamespaceDefault)) test.TestGet(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
} }
func TestList(t *testing.T) { func TestList(t *testing.T) {
@ -122,7 +122,7 @@ func TestList(t *testing.T) {
defer server.Terminate(t) defer server.Terminate(t)
defer storage.Store.DestroyFunc() defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store) test := genericregistrytest.New(t, storage.Store)
test.TestList(validNewPodScheduling("foo", metav1.NamespaceDefault)) test.TestList(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
} }
func TestWatch(t *testing.T) { func TestWatch(t *testing.T) {
@ -131,7 +131,7 @@ func TestWatch(t *testing.T) {
defer storage.Store.DestroyFunc() defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store) test := genericregistrytest.New(t, storage.Store)
test.TestWatch( test.TestWatch(
validNewPodScheduling("foo", metav1.NamespaceDefault), validNewPodSchedulingContexts("foo", metav1.NamespaceDefault),
// matching labels // matching labels
[]labels.Set{}, []labels.Set{},
// not matching labels // not matching labels
@ -156,19 +156,19 @@ func TestUpdateStatus(t *testing.T) {
ctx := genericapirequest.NewDefaultContext() ctx := genericapirequest.NewDefaultContext()
key, _ := storage.KeyFunc(ctx, "foo") key, _ := storage.KeyFunc(ctx, "foo")
schedulingStart := validNewPodScheduling("foo", metav1.NamespaceDefault) schedulingStart := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)
err := storage.Storage.Create(ctx, key, schedulingStart, nil, 0, false) err := storage.Storage.Create(ctx, key, schedulingStart, nil, 0, false)
if err != nil { if err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
scheduling := schedulingStart.DeepCopy() schedulingCtx := schedulingStart.DeepCopy()
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims, schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{ resource.ResourceClaimSchedulingStatus{
Name: "my-claim", Name: "my-claim",
}, },
) )
_, _, err = statusStorage.Update(ctx, scheduling.Name, rest.DefaultUpdatedObjectInfo(scheduling), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}) _, _, err = statusStorage.Update(ctx, schedulingCtx.Name, rest.DefaultUpdatedObjectInfo(schedulingCtx), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil { if err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
@ -176,9 +176,9 @@ func TestUpdateStatus(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
schedulingOut := obj.(*resource.PodScheduling) schedulingOut := obj.(*resource.PodSchedulingContext)
// only compare relevant changes b/c of difference in metadata // only compare relevant changes b/c of difference in metadata
if !apiequality.Semantic.DeepEqual(scheduling.Status, schedulingOut.Status) { if !apiequality.Semantic.DeepEqual(schedulingCtx.Status, schedulingOut.Status) {
t.Errorf("unexpected object: %s", diff.ObjectDiff(scheduling.Status, schedulingOut.Status)) t.Errorf("unexpected object: %s", diff.ObjectDiff(schedulingCtx.Status, schedulingOut.Status))
} }
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package podscheduling package podschedulingcontext
import ( import (
"context" "context"
@ -33,7 +33,7 @@ import (
"sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
) )
// podSchedulingStrategy implements behavior for PodScheduling objects // podSchedulingStrategy implements behavior for PodSchedulingContext objects
type podSchedulingStrategy struct { type podSchedulingStrategy struct {
runtime.ObjectTyper runtime.ObjectTyper
names.NameGenerator names.NameGenerator
@ -48,7 +48,7 @@ func (podSchedulingStrategy) NamespaceScoped() bool {
} }
// GetResetFields returns the set of fields that get reset by the strategy and // GetResetFields returns the set of fields that get reset by the strategy and
// should not be modified by the user. For a new PodScheduling that is the // should not be modified by the user. For a new PodSchedulingContext that is the
// status. // status.
func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
fields := map[fieldpath.APIVersion]*fieldpath.Set{ fields := map[fieldpath.APIVersion]*fieldpath.Set{
@ -61,14 +61,14 @@ func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpat
} }
func (podSchedulingStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { func (podSchedulingStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
scheduling := obj.(*resource.PodScheduling) scheduling := obj.(*resource.PodSchedulingContext)
// Status must not be set by user on create. // Status must not be set by user on create.
scheduling.Status = resource.PodSchedulingStatus{} scheduling.Status = resource.PodSchedulingContextStatus{}
} }
func (podSchedulingStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { func (podSchedulingStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
scheduling := obj.(*resource.PodScheduling) scheduling := obj.(*resource.PodSchedulingContext)
return validation.ValidatePodScheduling(scheduling) return validation.ValidatePodSchedulingContexts(scheduling)
} }
func (podSchedulingStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { func (podSchedulingStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
@ -83,16 +83,16 @@ func (podSchedulingStrategy) AllowCreateOnUpdate() bool {
} }
func (podSchedulingStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { func (podSchedulingStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
newScheduling := obj.(*resource.PodScheduling) newScheduling := obj.(*resource.PodSchedulingContext)
oldScheduling := old.(*resource.PodScheduling) oldScheduling := old.(*resource.PodSchedulingContext)
newScheduling.Status = oldScheduling.Status newScheduling.Status = oldScheduling.Status
} }
func (podSchedulingStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { func (podSchedulingStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
newScheduling := obj.(*resource.PodScheduling) newScheduling := obj.(*resource.PodSchedulingContext)
oldScheduling := old.(*resource.PodScheduling) oldScheduling := old.(*resource.PodSchedulingContext)
errorList := validation.ValidatePodScheduling(newScheduling) errorList := validation.ValidatePodSchedulingContexts(newScheduling)
return append(errorList, validation.ValidatePodSchedulingUpdate(newScheduling, oldScheduling)...) return append(errorList, validation.ValidatePodSchedulingContextUpdate(newScheduling, oldScheduling)...)
} }
func (podSchedulingStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { func (podSchedulingStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
@ -122,15 +122,15 @@ func (podSchedulingStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fi
} }
func (podSchedulingStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { func (podSchedulingStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
newScheduling := obj.(*resource.PodScheduling) newScheduling := obj.(*resource.PodSchedulingContext)
oldScheduling := old.(*resource.PodScheduling) oldScheduling := old.(*resource.PodSchedulingContext)
newScheduling.Spec = oldScheduling.Spec newScheduling.Spec = oldScheduling.Spec
} }
func (podSchedulingStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { func (podSchedulingStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
newScheduling := obj.(*resource.PodScheduling) newScheduling := obj.(*resource.PodSchedulingContext)
oldScheduling := old.(*resource.PodScheduling) oldScheduling := old.(*resource.PodSchedulingContext)
return validation.ValidatePodSchedulingStatusUpdate(newScheduling, oldScheduling) return validation.ValidatePodSchedulingContextStatusUpdate(newScheduling, oldScheduling)
} }
// WarningsOnUpdate returns warnings for the given update. // WarningsOnUpdate returns warnings for the given update.
@ -149,15 +149,15 @@ func Match(label labels.Selector, field fields.Selector) storage.SelectionPredic
// GetAttrs returns labels and fields of a given object for filtering purposes. // GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) { func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
scheduling, ok := obj.(*resource.PodScheduling) scheduling, ok := obj.(*resource.PodSchedulingContext)
if !ok { if !ok {
return nil, nil, errors.New("not a PodScheduling") return nil, nil, errors.New("not a PodSchedulingContext")
} }
return labels.Set(scheduling.Labels), toSelectableFields(scheduling), nil return labels.Set(scheduling.Labels), toSelectableFields(scheduling), nil
} }
// toSelectableFields returns a field set that represents the object // toSelectableFields returns a field set that represents the object
func toSelectableFields(scheduling *resource.PodScheduling) fields.Set { func toSelectableFields(scheduling *resource.PodSchedulingContext) fields.Set {
fields := generic.ObjectMetaFieldsSet(&scheduling.ObjectMeta, true) fields := generic.ObjectMetaFieldsSet(&scheduling.ObjectMeta, true)
return fields return fields
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package podscheduling package podschedulingcontext
import ( import (
"testing" "testing"
@ -24,31 +24,31 @@ import (
"k8s.io/kubernetes/pkg/apis/resource" "k8s.io/kubernetes/pkg/apis/resource"
) )
var podScheduling = &resource.PodScheduling{ var schedulingCtx = &resource.PodSchedulingContext{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "valid-pod", Name: "valid-pod",
Namespace: "default", Namespace: "default",
}, },
Spec: resource.PodSchedulingSpec{ Spec: resource.PodSchedulingContextSpec{
SelectedNode: "worker", SelectedNode: "worker",
}, },
} }
func TestPodSchedulingStrategy(t *testing.T) { func TestPodSchedulingStrategy(t *testing.T) {
if !Strategy.NamespaceScoped() { if !Strategy.NamespaceScoped() {
t.Errorf("PodScheduling must be namespace scoped") t.Errorf("PodSchedulingContext must be namespace scoped")
} }
if Strategy.AllowCreateOnUpdate() { if Strategy.AllowCreateOnUpdate() {
t.Errorf("PodScheduling should not allow create on update") t.Errorf("PodSchedulingContext should not allow create on update")
} }
} }
func TestPodSchedulingStrategyCreate(t *testing.T) { func TestPodSchedulingStrategyCreate(t *testing.T) {
ctx := genericapirequest.NewDefaultContext() ctx := genericapirequest.NewDefaultContext()
podScheduling := podScheduling.DeepCopy() schedulingCtx := schedulingCtx.DeepCopy()
Strategy.PrepareForCreate(ctx, podScheduling) Strategy.PrepareForCreate(ctx, schedulingCtx)
errs := Strategy.Validate(ctx, podScheduling) errs := Strategy.Validate(ctx, schedulingCtx)
if len(errs) != 0 { if len(errs) != 0 {
t.Errorf("unexpected error validating for create %v", errs) t.Errorf("unexpected error validating for create %v", errs)
} }
@ -57,12 +57,12 @@ func TestPodSchedulingStrategyCreate(t *testing.T) {
func TestPodSchedulingStrategyUpdate(t *testing.T) { func TestPodSchedulingStrategyUpdate(t *testing.T) {
t.Run("no-changes-okay", func(t *testing.T) { t.Run("no-changes-okay", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext() ctx := genericapirequest.NewDefaultContext()
podScheduling := podScheduling.DeepCopy() schedulingCtx := schedulingCtx.DeepCopy()
newPodScheduling := podScheduling.DeepCopy() newSchedulingCtx := schedulingCtx.DeepCopy()
newPodScheduling.ResourceVersion = "4" newSchedulingCtx.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling) Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx)
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling) errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx)
if len(errs) != 0 { if len(errs) != 0 {
t.Errorf("unexpected validation errors: %v", errs) t.Errorf("unexpected validation errors: %v", errs)
} }
@ -70,13 +70,13 @@ func TestPodSchedulingStrategyUpdate(t *testing.T) {
t.Run("name-change-not-allowed", func(t *testing.T) { t.Run("name-change-not-allowed", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext() ctx := genericapirequest.NewDefaultContext()
podScheduling := podScheduling.DeepCopy() schedulingCtx := schedulingCtx.DeepCopy()
newPodScheduling := podScheduling.DeepCopy() newSchedulingCtx := schedulingCtx.DeepCopy()
newPodScheduling.Name = "valid-claim-2" newSchedulingCtx.Name = "valid-claim-2"
newPodScheduling.ResourceVersion = "4" newSchedulingCtx.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling) Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx)
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling) errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx)
if len(errs) == 0 { if len(errs) == 0 {
t.Errorf("expected a validation error") t.Errorf("expected a validation error")
} }

View File

@ -24,7 +24,7 @@ import (
serverstorage "k8s.io/apiserver/pkg/server/storage" serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/resource" "k8s.io/kubernetes/pkg/apis/resource"
podschedulingstore "k8s.io/kubernetes/pkg/registry/resource/podscheduling/storage" podschedulingcontextsstore "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext/storage"
resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage" resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage"
resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage" resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage"
resourceclassstore "k8s.io/kubernetes/pkg/registry/resource/resourceclass/storage" resourceclassstore "k8s.io/kubernetes/pkg/registry/resource/resourceclass/storage"
@ -74,8 +74,8 @@ func (p RESTStorageProvider) v1alpha2Storage(apiResourceConfigSource serverstora
storage[resource] = resourceClaimTemplateStorage storage[resource] = resourceClaimTemplateStorage
} }
if resource := "podschedulings"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) { if resource := "podschedulingcontexts"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingstore.NewREST(restOptionsGetter) podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingcontextsstore.NewREST(restOptionsGetter)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -378,10 +378,10 @@ func addAllEventHandlers(
informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler( informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler(
buildEvtResHandler(at, framework.PersistentVolumeClaim, "Pvc"), buildEvtResHandler(at, framework.PersistentVolumeClaim, "Pvc"),
) )
case framework.PodScheduling: case framework.PodSchedulingContext:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
_, _ = informerFactory.Resource().V1alpha2().PodSchedulings().Informer().AddEventHandler( _, _ = informerFactory.Resource().V1alpha2().PodSchedulingContexts().Informer().AddEventHandler(
buildEvtResHandler(at, framework.PodScheduling, "PodScheduling"), buildEvtResHandler(at, framework.PodSchedulingContext, "PodSchedulingContext"),
) )
} }
case framework.ResourceClaim: case framework.ResourceClaim:

View File

@ -74,14 +74,14 @@ type stateData struct {
// protected by the mutex. Used by PostFilter. // protected by the mutex. Used by PostFilter.
unavailableClaims sets.Int unavailableClaims sets.Int
// A pointer to the PodScheduling object for the pod, if one exists. // A pointer to the PodSchedulingContext object for the pod, if one exists.
// Gets set on demand. // Gets set on demand.
// //
// Conceptually, this object belongs into the scheduler framework // Conceptually, this object belongs into the scheduler framework
// where it might get shared by different plugins. But in practice, // where it might get shared by different plugins. But in practice,
// it is currently only used by dynamic provisioning and thus // it is currently only used by dynamic provisioning and thus
// managed entirely here. // managed entirely here.
podScheduling *resourcev1alpha2.PodScheduling schedulingCtx *resourcev1alpha2.PodSchedulingContext
// podSchedulingDirty is true if the current copy was locally modified. // podSchedulingDirty is true if the current copy was locally modified.
podSchedulingDirty bool podSchedulingDirty bool
@ -112,23 +112,23 @@ func (d *stateData) updateClaimStatus(ctx context.Context, clientset kubernetes.
return nil return nil
} }
// initializePodScheduling can be called concurrently. It returns an existing PodScheduling // initializePodSchedulingContext can be called concurrently. It returns an existing PodSchedulingContext
// object if there is one already, retrieves one if not, or as a last resort creates // object if there is one already, retrieves one if not, or as a last resort creates
// one from scratch. // one from scratch.
func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, podSchedulingLister resourcev1alpha2listers.PodSchedulingLister) (*resourcev1alpha2.PodScheduling, error) { func (d *stateData) initializePodSchedulingContexts(ctx context.Context, pod *v1.Pod, podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister) (*resourcev1alpha2.PodSchedulingContext, error) {
// TODO (#113701): check if this mutex locking can be avoided by calling initializePodScheduling during PreFilter. // TODO (#113701): check if this mutex locking can be avoided by calling initializePodSchedulingContext during PreFilter.
d.mutex.Lock() d.mutex.Lock()
defer d.mutex.Unlock() defer d.mutex.Unlock()
if d.podScheduling != nil { if d.schedulingCtx != nil {
return d.podScheduling, nil return d.schedulingCtx, nil
} }
podScheduling, err := podSchedulingLister.PodSchedulings(pod.Namespace).Get(pod.Name) schedulingCtx, err := podSchedulingContextLister.PodSchedulingContexts(pod.Namespace).Get(pod.Name)
switch { switch {
case apierrors.IsNotFound(err): case apierrors.IsNotFound(err):
controller := true controller := true
podScheduling = &resourcev1alpha2.PodScheduling{ schedulingCtx = &resourcev1alpha2.PodSchedulingContext{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: pod.Name, Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,
@ -148,56 +148,56 @@ func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, po
return nil, err return nil, err
default: default:
// We have an object, but it might be obsolete. // We have an object, but it might be obsolete.
if !metav1.IsControlledBy(podScheduling, pod) { if !metav1.IsControlledBy(schedulingCtx, pod) {
return nil, fmt.Errorf("PodScheduling object with UID %s is not owned by Pod %s/%s", podScheduling.UID, pod.Namespace, pod.Name) return nil, fmt.Errorf("PodSchedulingContext object with UID %s is not owned by Pod %s/%s", schedulingCtx.UID, pod.Namespace, pod.Name)
} }
} }
d.podScheduling = podScheduling d.schedulingCtx = schedulingCtx
return podScheduling, err return schedulingCtx, err
} }
// publishPodScheduling creates or updates the PodScheduling object. // publishPodSchedulingContext creates or updates the PodSchedulingContext object.
func (d *stateData) publishPodScheduling(ctx context.Context, clientset kubernetes.Interface, podScheduling *resourcev1alpha2.PodScheduling) error { func (d *stateData) publishPodSchedulingContexts(ctx context.Context, clientset kubernetes.Interface, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error {
d.mutex.Lock() d.mutex.Lock()
defer d.mutex.Unlock() defer d.mutex.Unlock()
var err error var err error
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
msg := "Updating PodScheduling" msg := "Updating PodSchedulingContext"
if podScheduling.UID == "" { if schedulingCtx.UID == "" {
msg = "Creating PodScheduling" msg = "Creating PodSchedulingContext"
} }
if loggerV := logger.V(6); loggerV.Enabled() { if loggerV := logger.V(6); loggerV.Enabled() {
// At a high enough log level, dump the entire object. // At a high enough log level, dump the entire object.
loggerV.Info(msg, "podschedulingDump", podScheduling) loggerV.Info(msg, "podSchedulingCtxDump", schedulingCtx)
} else { } else {
logger.V(5).Info(msg, "podscheduling", klog.KObj(podScheduling)) logger.V(5).Info(msg, "podSchedulingCtx", klog.KObj(schedulingCtx))
} }
if podScheduling.UID == "" { if schedulingCtx.UID == "" {
podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Create(ctx, podScheduling, metav1.CreateOptions{}) schedulingCtx, err = clientset.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).Create(ctx, schedulingCtx, metav1.CreateOptions{})
} else { } else {
// TODO (#113700): patch here to avoid racing with drivers which update the status. // TODO (#113700): patch here to avoid racing with drivers which update the status.
podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Update(ctx, podScheduling, metav1.UpdateOptions{}) schedulingCtx, err = clientset.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).Update(ctx, schedulingCtx, metav1.UpdateOptions{})
} }
if err != nil { if err != nil {
return err return err
} }
d.podScheduling = podScheduling d.schedulingCtx = schedulingCtx
d.podSchedulingDirty = false d.podSchedulingDirty = false
return nil return nil
} }
// storePodScheduling replaces the pod scheduling object in the state. // storePodSchedulingContext replaces the pod schedulingCtx object in the state.
func (d *stateData) storePodScheduling(podScheduling *resourcev1alpha2.PodScheduling) { func (d *stateData) storePodSchedulingContexts(schedulingCtx *resourcev1alpha2.PodSchedulingContext) {
d.mutex.Lock() d.mutex.Lock()
defer d.mutex.Unlock() defer d.mutex.Unlock()
d.podScheduling = podScheduling d.schedulingCtx = schedulingCtx
d.podSchedulingDirty = true d.podSchedulingDirty = true
} }
func statusForClaim(podScheduling *resourcev1alpha2.PodScheduling, podClaimName string) *resourcev1alpha2.ResourceClaimSchedulingStatus { func statusForClaim(schedulingCtx *resourcev1alpha2.PodSchedulingContext, podClaimName string) *resourcev1alpha2.ResourceClaimSchedulingStatus {
for _, status := range podScheduling.Status.ResourceClaims { for _, status := range schedulingCtx.Status.ResourceClaims {
if status.Name == podClaimName { if status.Name == podClaimName {
return &status return &status
} }
@ -207,11 +207,11 @@ func statusForClaim(podScheduling *resourcev1alpha2.PodScheduling, podClaimName
// dynamicResources is a plugin that ensures that ResourceClaims are allocated. // dynamicResources is a plugin that ensures that ResourceClaims are allocated.
type dynamicResources struct { type dynamicResources struct {
enabled bool enabled bool
clientset kubernetes.Interface clientset kubernetes.Interface
claimLister resourcev1alpha2listers.ResourceClaimLister claimLister resourcev1alpha2listers.ResourceClaimLister
classLister resourcev1alpha2listers.ResourceClassLister classLister resourcev1alpha2listers.ResourceClassLister
podSchedulingLister resourcev1alpha2listers.PodSchedulingLister podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister
} }
// New initializes a new plugin and returns it. // New initializes a new plugin and returns it.
@ -222,11 +222,11 @@ func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram
} }
return &dynamicResources{ return &dynamicResources{
enabled: true, enabled: true,
clientset: fh.ClientSet(), clientset: fh.ClientSet(),
claimLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Lister(), claimLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Lister(),
classLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClasses().Lister(), classLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClasses().Lister(),
podSchedulingLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulings().Lister(), podSchedulingContextLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulingContexts().Lister(),
}, nil }, nil
} }
@ -257,7 +257,7 @@ func (pl *dynamicResources) EventsToRegister() []framework.ClusterEvent {
// may be schedulable. // may be schedulable.
// TODO (#113702): can we change this so that such an event does not trigger *all* pods? // TODO (#113702): can we change this so that such an event does not trigger *all* pods?
// Yes: https://github.com/kubernetes/kubernetes/blob/abcbaed0784baf5ed2382aae9705a8918f2daa18/pkg/scheduler/eventhandlers.go#L70 // Yes: https://github.com/kubernetes/kubernetes/blob/abcbaed0784baf5ed2382aae9705a8918f2daa18/pkg/scheduler/eventhandlers.go#L70
{Resource: framework.PodScheduling, ActionType: framework.Add | framework.Update}, {Resource: framework.PodSchedulingContext, ActionType: framework.Add | framework.Update},
// A resource might depend on node labels for topology filtering. // A resource might depend on node labels for topology filtering.
// A new or updated node may make pods schedulable. // A new or updated node may make pods schedulable.
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel}, {Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel},
@ -436,11 +436,11 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState
} }
// Now we need information from drivers. // Now we need information from drivers.
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister) schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
if err != nil { if err != nil {
return statusError(logger, err) return statusError(logger, err)
} }
status := statusForClaim(podScheduling, pod.Spec.ResourceClaims[index].Name) status := statusForClaim(schedulingCtx, pod.Spec.ResourceClaims[index].Name)
if status != nil { if status != nil {
for _, unsuitableNode := range status.UnsuitableNodes { for _, unsuitableNode := range status.UnsuitableNodes {
if node.Name == unsuitableNode { if node.Name == unsuitableNode {
@ -530,7 +530,7 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
} }
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister) schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
if err != nil { if err != nil {
return statusError(logger, err) return statusError(logger, err)
} }
@ -540,22 +540,22 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
pending = true pending = true
} }
} }
if pending && !haveAllNodes(podScheduling.Spec.PotentialNodes, nodes) { if pending && !haveAllNodes(schedulingCtx.Spec.PotentialNodes, nodes) {
// Remember the potential nodes. The object will get created or // Remember the potential nodes. The object will get created or
// updated in Reserve. This is both an optimization and // updated in Reserve. This is both an optimization and
// covers the case that PreScore doesn't get called when there // covers the case that PreScore doesn't get called when there
// is only a single node. // is only a single node.
logger.V(5).Info("remembering potential nodes", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes)) logger.V(5).Info("remembering potential nodes", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes))
podScheduling = podScheduling.DeepCopy() schedulingCtx = schedulingCtx.DeepCopy()
numNodes := len(nodes) numNodes := len(nodes)
if numNodes > resourcev1alpha2.PodSchedulingNodeListMaxSize { if numNodes > resourcev1alpha2.PodSchedulingNodeListMaxSize {
numNodes = resourcev1alpha2.PodSchedulingNodeListMaxSize numNodes = resourcev1alpha2.PodSchedulingNodeListMaxSize
} }
podScheduling.Spec.PotentialNodes = make([]string, 0, numNodes) schedulingCtx.Spec.PotentialNodes = make([]string, 0, numNodes)
if numNodes == len(nodes) { if numNodes == len(nodes) {
// Copy all node names. // Copy all node names.
for _, node := range nodes { for _, node := range nodes {
podScheduling.Spec.PotentialNodes = append(podScheduling.Spec.PotentialNodes, node.Name) schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, node.Name)
} }
} else { } else {
// Select a random subset of the nodes to comply with // Select a random subset of the nodes to comply with
@ -567,14 +567,14 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
nodeNames[node.Name] = struct{}{} nodeNames[node.Name] = struct{}{}
} }
for nodeName := range nodeNames { for nodeName := range nodeNames {
if len(podScheduling.Spec.PotentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize { if len(schedulingCtx.Spec.PotentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize {
break break
} }
podScheduling.Spec.PotentialNodes = append(podScheduling.Spec.PotentialNodes, nodeName) schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, nodeName)
} }
} }
sort.Strings(podScheduling.Spec.PotentialNodes) sort.Strings(schedulingCtx.Spec.PotentialNodes)
state.storePodScheduling(podScheduling) state.storePodSchedulingContexts(schedulingCtx)
} }
logger.V(5).Info("all potential nodes already set", "pod", klog.KObj(pod), "potentialnodes", nodes) logger.V(5).Info("all potential nodes already set", "pod", klog.KObj(pod), "potentialnodes", nodes)
return nil return nil
@ -614,7 +614,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
numDelayedAllocationPending := 0 numDelayedAllocationPending := 0
numClaimsWithStatusInfo := 0 numClaimsWithStatusInfo := 0
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister) schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
if err != nil { if err != nil {
return statusError(logger, err) return statusError(logger, err)
} }
@ -639,7 +639,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
return statusError(logger, err) return statusError(logger, err)
} }
// If we get here, we know that reserving the claim for // If we get here, we know that reserving the claim for
// the pod worked and we can proceed with scheduling // the pod worked and we can proceed with schedulingCtx
// it. // it.
} else { } else {
// Must be delayed allocation. // Must be delayed allocation.
@ -647,7 +647,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
// Did the driver provide information that steered node // Did the driver provide information that steered node
// selection towards a node that it can support? // selection towards a node that it can support?
if statusForClaim(podScheduling, pod.Spec.ResourceClaims[index].Name) != nil { if statusForClaim(schedulingCtx, pod.Spec.ResourceClaims[index].Name) != nil {
numClaimsWithStatusInfo++ numClaimsWithStatusInfo++
} }
} }
@ -659,13 +659,13 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
} }
podSchedulingDirty := state.podSchedulingDirty podSchedulingDirty := state.podSchedulingDirty
if len(podScheduling.Spec.PotentialNodes) == 0 { if len(schedulingCtx.Spec.PotentialNodes) == 0 {
// PreScore was not called, probably because there was // PreScore was not called, probably because there was
// only one candidate. We need to ask whether that // only one candidate. We need to ask whether that
// node is suitable, otherwise the scheduler will pick // node is suitable, otherwise the scheduler will pick
// it forever even when it cannot satisfy the claim. // it forever even when it cannot satisfy the claim.
podScheduling = podScheduling.DeepCopy() schedulingCtx = schedulingCtx.DeepCopy()
podScheduling.Spec.PotentialNodes = []string{nodeName} schedulingCtx.Spec.PotentialNodes = []string{nodeName}
logger.V(5).Info("asking for information about single potential node", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) logger.V(5).Info("asking for information about single potential node", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
podSchedulingDirty = true podSchedulingDirty = true
} }
@ -675,16 +675,16 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
// the driver yet. Otherwise we wait for information before blindly // the driver yet. Otherwise we wait for information before blindly
// making a decision that might have to be reversed later. // making a decision that might have to be reversed later.
if numDelayedAllocationPending == 1 || numClaimsWithStatusInfo == numDelayedAllocationPending { if numDelayedAllocationPending == 1 || numClaimsWithStatusInfo == numDelayedAllocationPending {
podScheduling = podScheduling.DeepCopy() schedulingCtx = schedulingCtx.DeepCopy()
// TODO: can we increase the chance that the scheduler picks // TODO: can we increase the chance that the scheduler picks
// the same node as before when allocation is on-going, // the same node as before when allocation is on-going,
// assuming that that node still fits the pod? Picking a // assuming that that node still fits the pod? Picking a
// different node may lead to some claims being allocated for // different node may lead to some claims being allocated for
// one node and others for another, which then would have to be // one node and others for another, which then would have to be
// resolved with deallocation. // resolved with deallocation.
podScheduling.Spec.SelectedNode = nodeName schedulingCtx.Spec.SelectedNode = nodeName
logger.V(5).Info("start allocation", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) logger.V(5).Info("start allocation", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
if err := state.publishPodScheduling(ctx, pl.clientset, podScheduling); err != nil { if err := state.publishPodSchedulingContexts(ctx, pl.clientset, schedulingCtx); err != nil {
return statusError(logger, err) return statusError(logger, err)
} }
return statusUnschedulable(logger, "waiting for resource driver to allocate resource", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) return statusUnschedulable(logger, "waiting for resource driver to allocate resource", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
@ -692,14 +692,14 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
// May have been modified earlier in PreScore or above. // May have been modified earlier in PreScore or above.
if podSchedulingDirty { if podSchedulingDirty {
if err := state.publishPodScheduling(ctx, pl.clientset, podScheduling); err != nil { if err := state.publishPodSchedulingContexts(ctx, pl.clientset, schedulingCtx); err != nil {
return statusError(logger, err) return statusError(logger, err)
} }
} }
// More than one pending claim and not enough information about all of them. // More than one pending claim and not enough information about all of them.
// //
// TODO: can or should we ensure that scheduling gets aborted while // TODO: can or should we ensure that schedulingCtx gets aborted while
// waiting for resources *before* triggering delayed volume // waiting for resources *before* triggering delayed volume
// provisioning? On the one hand, volume provisioning is currently // provisioning? On the one hand, volume provisioning is currently
// irreversible, so it better should come last. On the other hand, // irreversible, so it better should come last. On the other hand,
@ -737,7 +737,7 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
claim.Status.ReservedFor = reservedFor claim.Status.ReservedFor = reservedFor
logger.V(5).Info("unreserve", "resourceclaim", klog.KObj(claim)) logger.V(5).Info("unreserve", "resourceclaim", klog.KObj(claim))
if err := state.updateClaimStatus(ctx, pl.clientset, index, claim); err != nil { if err := state.updateClaimStatus(ctx, pl.clientset, index, claim); err != nil {
// We will get here again when pod scheduling // We will get here again when pod schedulingCtx
// is retried. // is retried.
logger.Error(err, "unreserve", "resourceclaim", klog.KObj(claim)) logger.Error(err, "unreserve", "resourceclaim", klog.KObj(claim))
} }
@ -746,7 +746,7 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
} }
// PostBind is called after a pod is successfully bound to a node. Now we are // PostBind is called after a pod is successfully bound to a node. Now we are
// sure that a PodScheduling object, if it exists, is definitely not going to // sure that a PodSchedulingContext object, if it exists, is definitely not going to
// be needed anymore and can delete it. This is a one-shot thing, there won't // be needed anymore and can delete it. This is a one-shot thing, there won't
// be any retries. This is okay because it should usually work and in those // be any retries. This is okay because it should usually work and in those
// cases where it doesn't, the garbage collector will eventually clean up. // cases where it doesn't, the garbage collector will eventually clean up.
@ -762,19 +762,19 @@ func (pl *dynamicResources) PostBind(ctx context.Context, cs *framework.CycleSta
return return
} }
// We cannot know for sure whether the PodScheduling object exists. We // We cannot know for sure whether the PodSchedulingContext object exists. We
// might have created it in the previous pod scheduling cycle and not // might have created it in the previous pod schedulingCtx cycle and not
// have it in our informer cache yet. Let's try to delete, just to be // have it in our informer cache yet. Let's try to delete, just to be
// on the safe side. // on the safe side.
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
err = pl.clientset.ResourceV1alpha2().PodSchedulings(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) err = pl.clientset.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
switch { switch {
case apierrors.IsNotFound(err): case apierrors.IsNotFound(err):
logger.V(5).Info("no PodScheduling object to delete") logger.V(5).Info("no PodSchedulingContext object to delete")
case err != nil: case err != nil:
logger.Error(err, "delete PodScheduling") logger.Error(err, "delete PodSchedulingContext")
default: default:
logger.V(5).Info("PodScheduling object deleted") logger.V(5).Info("PodSchedulingContext object deleted")
} }
} }

View File

@ -125,16 +125,16 @@ var (
ResourceClassName(className). ResourceClassName(className).
Obj() Obj()
scheduling = st.MakePodScheduling().Name(podName).Namespace(namespace). scheduling = st.MakePodSchedulingContexts().Name(podName).Namespace(namespace).
OwnerReference(podName, podUID, podKind). OwnerReference(podName, podUID, podKind).
Obj() Obj()
schedulingPotential = st.FromPodScheduling(scheduling). schedulingPotential = st.FromPodSchedulingContexts(scheduling).
PotentialNodes(workerNode.Name). PotentialNodes(workerNode.Name).
Obj() Obj()
schedulingSelectedPotential = st.FromPodScheduling(schedulingPotential). schedulingSelectedPotential = st.FromPodSchedulingContexts(schedulingPotential).
SelectedNode(workerNode.Name). SelectedNode(workerNode.Name).
Obj() Obj()
schedulingInfo = st.FromPodScheduling(schedulingPotential). schedulingInfo = st.FromPodSchedulingContexts(schedulingPotential).
ResourceClaims(resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName}, ResourceClaims(resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName},
resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName2}). resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName2}).
Obj() Obj()
@ -160,7 +160,7 @@ type result struct {
// functions will get called for all objects of that type. If they needs to // functions will get called for all objects of that type. If they needs to
// make changes only to a particular instance, then it must check the name. // make changes only to a particular instance, then it must check the name.
type change struct { type change struct {
scheduling func(*resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling scheduling func(*resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext
claim func(*resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim claim func(*resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim
} }
type perNodeResult map[string]result type perNodeResult map[string]result
@ -203,7 +203,7 @@ func TestPlugin(t *testing.T) {
pod *v1.Pod pod *v1.Pod
claims []*resourcev1alpha2.ResourceClaim claims []*resourcev1alpha2.ResourceClaim
classes []*resourcev1alpha2.ResourceClass classes []*resourcev1alpha2.ResourceClass
schedulings []*resourcev1alpha2.PodScheduling schedulings []*resourcev1alpha2.PodSchedulingContext
prepare prepare prepare prepare
want want want want
@ -269,7 +269,7 @@ func TestPlugin(t *testing.T) {
}, },
}, },
"delayed-allocation-scheduling-select-immediately": { "delayed-allocation-scheduling-select-immediately": {
// Create the PodScheduling object, ask for information // Create the PodSchedulingContext object, ask for information
// and select a node. // and select a node.
pod: podWithClaimName, pod: podWithClaimName,
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
@ -282,7 +282,7 @@ func TestPlugin(t *testing.T) {
}, },
}, },
"delayed-allocation-scheduling-ask": { "delayed-allocation-scheduling-ask": {
// Create the PodScheduling object, ask for // Create the PodSchedulingContext object, ask for
// information, but do not select a node because // information, but do not select a node because
// there are multiple claims. // there are multiple claims.
pod: podWithTwoClaimNames, pod: podWithTwoClaimNames,
@ -296,18 +296,18 @@ func TestPlugin(t *testing.T) {
}, },
}, },
"delayed-allocation-scheduling-finish": { "delayed-allocation-scheduling-finish": {
// Use the populated PodScheduling object to select a // Use the populated PodSchedulingContext object to select a
// node. // node.
pod: podWithClaimName, pod: podWithClaimName,
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo}, schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
classes: []*resourcev1alpha2.ResourceClass{resourceClass}, classes: []*resourcev1alpha2.ResourceClass{resourceClass},
want: want{ want: want{
reserve: result{ reserve: result{
status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`), status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`),
changes: change{ changes: change{
scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
return st.FromPodScheduling(in). return st.FromPodSchedulingContexts(in).
SelectedNode(workerNode.Name). SelectedNode(workerNode.Name).
Obj() Obj()
}, },
@ -316,19 +316,19 @@ func TestPlugin(t *testing.T) {
}, },
}, },
"delayed-allocation-scheduling-finish-concurrent-label-update": { "delayed-allocation-scheduling-finish-concurrent-label-update": {
// Use the populated PodScheduling object to select a // Use the populated PodSchedulingContext object to select a
// node. // node.
pod: podWithClaimName, pod: podWithClaimName,
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo}, schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
classes: []*resourcev1alpha2.ResourceClass{resourceClass}, classes: []*resourcev1alpha2.ResourceClass{resourceClass},
prepare: prepare{ prepare: prepare{
reserve: change{ reserve: change{
scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
// This does not actually conflict with setting the // This does not actually conflict with setting the
// selected node, but because the plugin is not using // selected node, but because the plugin is not using
// patching yet, Update nonetheless fails. // patching yet, Update nonetheless fails.
return st.FromPodScheduling(in). return st.FromPodSchedulingContexts(in).
Label("hello", "world"). Label("hello", "world").
Obj() Obj()
}, },
@ -341,10 +341,10 @@ func TestPlugin(t *testing.T) {
}, },
}, },
"delayed-allocation-scheduling-completed": { "delayed-allocation-scheduling-completed": {
// Remove PodScheduling object once the pod is scheduled. // Remove PodSchedulingContext object once the pod is scheduled.
pod: podWithClaimName, pod: podWithClaimName,
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim}, claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim},
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo}, schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
classes: []*resourcev1alpha2.ResourceClass{resourceClass}, classes: []*resourcev1alpha2.ResourceClass{resourceClass},
want: want{ want: want{
reserve: result{ reserve: result{
@ -366,7 +366,7 @@ func TestPlugin(t *testing.T) {
pod: otherPodWithClaimName, pod: otherPodWithClaimName,
claims: []*resourcev1alpha2.ResourceClaim{inUseClaim}, claims: []*resourcev1alpha2.ResourceClaim{inUseClaim},
classes: []*resourcev1alpha2.ResourceClass{}, classes: []*resourcev1alpha2.ResourceClass{},
schedulings: []*resourcev1alpha2.PodScheduling{}, schedulings: []*resourcev1alpha2.PodSchedulingContext{},
prepare: prepare{}, prepare: prepare{},
want: want{ want: want{
prefilter: result{ prefilter: result{
@ -591,7 +591,7 @@ func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) {
for _, claim := range claims.Items { for _, claim := range claims.Items {
objects = append(objects, &claim) objects = append(objects, &claim)
} }
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulings("").List(tc.ctx, metav1.ListOptions{}) schedulings, err := tc.client.ResourceV1alpha2().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{})
require.NoError(t, err, "list pod scheduling") require.NoError(t, err, "list pod scheduling")
for _, scheduling := range schedulings.Items { for _, scheduling := range schedulings.Items {
objects = append(objects, &scheduling) objects = append(objects, &scheduling)
@ -615,8 +615,8 @@ func (tc *testContext) updateAPIServer(t *testing.T, objects []metav1.Object, up
t.Fatalf("unexpected error during prepare update: %v", err) t.Fatalf("unexpected error during prepare update: %v", err)
} }
modified[i] = obj modified[i] = obj
case *resourcev1alpha2.PodScheduling: case *resourcev1alpha2.PodSchedulingContext:
obj, err := tc.client.ResourceV1alpha2().PodSchedulings(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{}) obj, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
if err != nil { if err != nil {
t.Fatalf("unexpected error during prepare update: %v", err) t.Fatalf("unexpected error during prepare update: %v", err)
} }
@ -650,7 +650,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje
if updates.claim != nil { if updates.claim != nil {
obj = updates.claim(in) obj = updates.claim(in)
} }
case *resourcev1alpha2.PodScheduling: case *resourcev1alpha2.PodSchedulingContext:
if updates.scheduling != nil { if updates.scheduling != nil {
obj = updates.scheduling(in) obj = updates.scheduling(in)
} }
@ -661,7 +661,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje
return updated return updated
} }
func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodScheduling) (result *testContext) { func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodSchedulingContext) (result *testContext) {
t.Helper() t.Helper()
tc := &testContext{} tc := &testContext{}
@ -702,7 +702,7 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceCl
require.NoError(t, err, "create resource class") require.NoError(t, err, "create resource class")
} }
for _, scheduling := range schedulings { for _, scheduling := range schedulings {
_, err := tc.client.ResourceV1alpha2().PodSchedulings(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{}) _, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{})
require.NoError(t, err, "create pod scheduling") require.NoError(t, err, "create pod scheduling")
} }

View File

@ -69,7 +69,7 @@ const (
Node GVK = "Node" Node GVK = "Node"
PersistentVolume GVK = "PersistentVolume" PersistentVolume GVK = "PersistentVolume"
PersistentVolumeClaim GVK = "PersistentVolumeClaim" PersistentVolumeClaim GVK = "PersistentVolumeClaim"
PodScheduling GVK = "PodScheduling" PodSchedulingContext GVK = "PodSchedulingContext"
ResourceClaim GVK = "ResourceClaim" ResourceClaim GVK = "ResourceClaim"
StorageClass GVK = "storage.k8s.io/StorageClass" StorageClass GVK = "storage.k8s.io/StorageClass"
CSINode GVK = "storage.k8s.io/CSINode" CSINode GVK = "storage.k8s.io/CSINode"

View File

@ -925,22 +925,24 @@ func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourcev1alpha2.R
return wrapper return wrapper
} }
// PodSchedulingWrapper wraps a PodScheduling inside. // PodSchedulingWrapper wraps a PodSchedulingContext inside.
type PodSchedulingWrapper struct{ resourcev1alpha2.PodScheduling } type PodSchedulingWrapper struct {
resourcev1alpha2.PodSchedulingContext
// MakePodScheduling creates a PodScheduling wrapper.
func MakePodScheduling() *PodSchedulingWrapper {
return &PodSchedulingWrapper{resourcev1alpha2.PodScheduling{}}
} }
// FromPodScheduling creates a PodScheduling wrapper from some existing object. // MakePodSchedulingContext creates a PodSchedulingContext wrapper.
func FromPodScheduling(other *resourcev1alpha2.PodScheduling) *PodSchedulingWrapper { func MakePodSchedulingContexts() *PodSchedulingWrapper {
return &PodSchedulingWrapper{resourcev1alpha2.PodSchedulingContext{}}
}
// FromPodSchedulingContext creates a PodSchedulingContext wrapper from some existing object.
func FromPodSchedulingContexts(other *resourcev1alpha2.PodSchedulingContext) *PodSchedulingWrapper {
return &PodSchedulingWrapper{*other.DeepCopy()} return &PodSchedulingWrapper{*other.DeepCopy()}
} }
// Obj returns the inner object. // Obj returns the inner object.
func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha2.PodScheduling { func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha2.PodSchedulingContext {
return &wrapper.PodScheduling return &wrapper.PodSchedulingContext
} }
// Name sets `s` as the name of the inner object. // Name sets `s` as the name of the inner object.

View File

@ -575,8 +575,8 @@ func ClusterRoles() []rbacv1.ClusterRole {
kubeSchedulerRules = append(kubeSchedulerRules, kubeSchedulerRules = append(kubeSchedulerRules,
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclaims", "resourceclasses").RuleOrDie(), rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclaims", "resourceclasses").RuleOrDie(),
rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(), rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulings").RuleOrDie(), rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulings/status").RuleOrDie(), rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulingcontexts/status").RuleOrDie(),
) )
} }
roles = append(roles, rbacv1.ClusterRole{ roles = append(roles, rbacv1.ClusterRole{

View File

@ -74,15 +74,15 @@ func (m *AllocationResult) XXX_DiscardUnknown() {
var xxx_messageInfo_AllocationResult proto.InternalMessageInfo var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
func (m *PodScheduling) Reset() { *m = PodScheduling{} } func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} }
func (*PodScheduling) ProtoMessage() {} func (*PodSchedulingContext) ProtoMessage() {}
func (*PodScheduling) Descriptor() ([]byte, []int) { func (*PodSchedulingContext) Descriptor() ([]byte, []int) {
return fileDescriptor_3add37bbd52889e0, []int{1} return fileDescriptor_3add37bbd52889e0, []int{1}
} }
func (m *PodScheduling) XXX_Unmarshal(b []byte) error { func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
} }
func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)] b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b) n, err := m.MarshalToSizedBuffer(b)
if err != nil { if err != nil {
@ -90,27 +90,27 @@ func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
} }
return b[:n], nil return b[:n], nil
} }
func (m *PodScheduling) XXX_Merge(src proto.Message) { func (m *PodSchedulingContext) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodScheduling.Merge(m, src) xxx_messageInfo_PodSchedulingContext.Merge(m, src)
} }
func (m *PodScheduling) XXX_Size() int { func (m *PodSchedulingContext) XXX_Size() int {
return m.Size() return m.Size()
} }
func (m *PodScheduling) XXX_DiscardUnknown() { func (m *PodSchedulingContext) XXX_DiscardUnknown() {
xxx_messageInfo_PodScheduling.DiscardUnknown(m) xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m)
} }
var xxx_messageInfo_PodScheduling proto.InternalMessageInfo var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo
func (m *PodSchedulingList) Reset() { *m = PodSchedulingList{} } func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} }
func (*PodSchedulingList) ProtoMessage() {} func (*PodSchedulingContextList) ProtoMessage() {}
func (*PodSchedulingList) Descriptor() ([]byte, []int) { func (*PodSchedulingContextList) Descriptor() ([]byte, []int) {
return fileDescriptor_3add37bbd52889e0, []int{2} return fileDescriptor_3add37bbd52889e0, []int{2}
} }
func (m *PodSchedulingList) XXX_Unmarshal(b []byte) error { func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
} }
func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)] b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b) n, err := m.MarshalToSizedBuffer(b)
if err != nil { if err != nil {
@ -118,27 +118,27 @@ func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
} }
return b[:n], nil return b[:n], nil
} }
func (m *PodSchedulingList) XXX_Merge(src proto.Message) { func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodSchedulingList.Merge(m, src) xxx_messageInfo_PodSchedulingContextList.Merge(m, src)
} }
func (m *PodSchedulingList) XXX_Size() int { func (m *PodSchedulingContextList) XXX_Size() int {
return m.Size() return m.Size()
} }
func (m *PodSchedulingList) XXX_DiscardUnknown() { func (m *PodSchedulingContextList) XXX_DiscardUnknown() {
xxx_messageInfo_PodSchedulingList.DiscardUnknown(m) xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m)
} }
var xxx_messageInfo_PodSchedulingList proto.InternalMessageInfo var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo
func (m *PodSchedulingSpec) Reset() { *m = PodSchedulingSpec{} } func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} }
func (*PodSchedulingSpec) ProtoMessage() {} func (*PodSchedulingContextSpec) ProtoMessage() {}
func (*PodSchedulingSpec) Descriptor() ([]byte, []int) { func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_3add37bbd52889e0, []int{3} return fileDescriptor_3add37bbd52889e0, []int{3}
} }
func (m *PodSchedulingSpec) XXX_Unmarshal(b []byte) error { func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
} }
func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)] b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b) n, err := m.MarshalToSizedBuffer(b)
if err != nil { if err != nil {
@ -146,27 +146,27 @@ func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
} }
return b[:n], nil return b[:n], nil
} }
func (m *PodSchedulingSpec) XXX_Merge(src proto.Message) { func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodSchedulingSpec.Merge(m, src) xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src)
} }
func (m *PodSchedulingSpec) XXX_Size() int { func (m *PodSchedulingContextSpec) XXX_Size() int {
return m.Size() return m.Size()
} }
func (m *PodSchedulingSpec) XXX_DiscardUnknown() { func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() {
xxx_messageInfo_PodSchedulingSpec.DiscardUnknown(m) xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m)
} }
var xxx_messageInfo_PodSchedulingSpec proto.InternalMessageInfo var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo
func (m *PodSchedulingStatus) Reset() { *m = PodSchedulingStatus{} } func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} }
func (*PodSchedulingStatus) ProtoMessage() {} func (*PodSchedulingContextStatus) ProtoMessage() {}
func (*PodSchedulingStatus) Descriptor() ([]byte, []int) { func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_3add37bbd52889e0, []int{4} return fileDescriptor_3add37bbd52889e0, []int{4}
} }
func (m *PodSchedulingStatus) XXX_Unmarshal(b []byte) error { func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
} }
func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)] b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b) n, err := m.MarshalToSizedBuffer(b)
if err != nil { if err != nil {
@ -174,17 +174,17 @@ func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte,
} }
return b[:n], nil return b[:n], nil
} }
func (m *PodSchedulingStatus) XXX_Merge(src proto.Message) { func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodSchedulingStatus.Merge(m, src) xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src)
} }
func (m *PodSchedulingStatus) XXX_Size() int { func (m *PodSchedulingContextStatus) XXX_Size() int {
return m.Size() return m.Size()
} }
func (m *PodSchedulingStatus) XXX_DiscardUnknown() { func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() {
xxx_messageInfo_PodSchedulingStatus.DiscardUnknown(m) xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m)
} }
var xxx_messageInfo_PodSchedulingStatus proto.InternalMessageInfo var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo
func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) ProtoMessage() {}
@ -552,10 +552,10 @@ var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo
func init() { func init() {
proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult")
proto.RegisterType((*PodScheduling)(nil), "k8s.io.api.resource.v1alpha2.PodScheduling") proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext")
proto.RegisterType((*PodSchedulingList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingList") proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList")
proto.RegisterType((*PodSchedulingSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingSpec") proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec")
proto.RegisterType((*PodSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingStatus") proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus")
proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim")
proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference")
proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList")
@ -576,81 +576,83 @@ func init() {
} }
var fileDescriptor_3add37bbd52889e0 = []byte{ var fileDescriptor_3add37bbd52889e0 = []byte{
// 1174 bytes of a gzipped FileDescriptorProto // 1209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0x1b, 0x45, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcf, 0x6e, 0xe3, 0xd4,
0x14, 0xcf, 0xc6, 0x6e, 0x95, 0x8c, 0x1b, 0x37, 0xd9, 0xb4, 0xc8, 0xad, 0x5a, 0xdb, 0xec, 0xc9, 0x17, 0xae, 0x93, 0x74, 0xd4, 0xde, 0xb4, 0x99, 0xd6, 0x6d, 0x7f, 0xca, 0x54, 0x33, 0x49, 0x7e,
0x12, 0xb0, 0xdb, 0x18, 0x04, 0x15, 0x1f, 0x95, 0xb2, 0x0d, 0x94, 0x08, 0x9a, 0x9a, 0x31, 0x91, 0x5e, 0x45, 0x02, 0xec, 0x69, 0x40, 0x43, 0xc5, 0x9f, 0x91, 0xea, 0x16, 0x86, 0x0a, 0xa6, 0x13,
0x08, 0x42, 0x88, 0xf1, 0xee, 0xab, 0xbd, 0x64, 0xbf, 0xd8, 0xd9, 0x35, 0xaa, 0xb8, 0xf4, 0xca, 0x6e, 0xa8, 0x98, 0x22, 0x84, 0xe6, 0xc6, 0x3e, 0x93, 0x98, 0xfa, 0x1f, 0xbe, 0xd7, 0x81, 0x11,
0x0d, 0x21, 0xee, 0x1c, 0xf9, 0x43, 0x10, 0x52, 0x8e, 0x91, 0xe0, 0xd0, 0x93, 0x45, 0xcc, 0x81, 0x9b, 0x79, 0x84, 0x59, 0xb0, 0x61, 0xc5, 0x92, 0x17, 0xe0, 0x0d, 0x10, 0x52, 0x97, 0x45, 0xb0,
0x3f, 0x80, 0x13, 0x3d, 0xa1, 0x19, 0xef, 0xae, 0x77, 0xd6, 0x1f, 0xc4, 0x11, 0x8a, 0xc2, 0x29, 0x98, 0x55, 0x44, 0xc3, 0x82, 0x07, 0x60, 0xc5, 0xac, 0x90, 0x1d, 0xdb, 0xb1, 0x9d, 0x38, 0x34,
0x99, 0x79, 0xbf, 0xf7, 0x9b, 0xf7, 0x31, 0xef, 0xcd, 0x5b, 0xa3, 0x77, 0x0f, 0xef, 0x52, 0xd5, 0x5d, 0x44, 0xb0, 0x9a, 0xf1, 0x3d, 0xdf, 0xf9, 0xee, 0xb9, 0xdf, 0xb9, 0xe7, 0xdc, 0x93, 0xa2,
0xf2, 0xb4, 0xc3, 0xa8, 0x03, 0x81, 0x0b, 0x21, 0x50, 0xad, 0x0f, 0xae, 0xe9, 0x05, 0x5a, 0x2c, 0x77, 0x4e, 0x77, 0xa9, 0xa8, 0x59, 0xd2, 0xa9, 0xdb, 0x06, 0xc7, 0x04, 0x06, 0x54, 0xea, 0x81,
0x20, 0xbe, 0xa5, 0x05, 0x40, 0xbd, 0x28, 0x30, 0x40, 0xeb, 0x6f, 0x11, 0xdb, 0xef, 0x91, 0xa6, 0xa9, 0x5a, 0x8e, 0x14, 0x18, 0x88, 0xad, 0x49, 0x0e, 0x50, 0xcb, 0x75, 0x14, 0x90, 0x7a, 0x3b,
0xd6, 0x05, 0x17, 0x02, 0x12, 0x82, 0xa9, 0xfa, 0x81, 0x17, 0x7a, 0xf2, 0xad, 0x11, 0x5a, 0x25, 0x44, 0xb7, 0xbb, 0xa4, 0x21, 0x75, 0xc0, 0x04, 0x87, 0x30, 0x50, 0x45, 0xdb, 0xb1, 0x98, 0xc5,
0xbe, 0xa5, 0x26, 0x68, 0x35, 0x41, 0xdf, 0x7c, 0xa5, 0x6b, 0x85, 0xbd, 0xa8, 0xa3, 0x1a, 0x9e, 0xdf, 0x1c, 0xa2, 0x45, 0x62, 0x6b, 0x62, 0x88, 0x16, 0x43, 0xf4, 0xf6, 0x2b, 0x1d, 0x8d, 0x75,
0xa3, 0x75, 0xbd, 0xae, 0xa7, 0x71, 0xa5, 0x4e, 0xf4, 0x98, 0xaf, 0xf8, 0x82, 0xff, 0x37, 0x22, 0xdd, 0xb6, 0xa8, 0x58, 0x86, 0xd4, 0xb1, 0x3a, 0x96, 0xe4, 0x3b, 0xb5, 0xdd, 0xc7, 0xfe, 0x97,
0xbb, 0xa9, 0x64, 0x8e, 0x36, 0xbc, 0x80, 0x1d, 0x9b, 0x3f, 0xf0, 0xe6, 0x6b, 0x63, 0x8c, 0x43, 0xff, 0xe1, 0xff, 0x6f, 0x48, 0xb6, 0x2d, 0xc4, 0xb6, 0x56, 0x2c, 0xc7, 0xdb, 0x36, 0xbd, 0xe1,
0x8c, 0x9e, 0xe5, 0x42, 0xf0, 0x44, 0xf3, 0x0f, 0xbb, 0x6c, 0x83, 0x6a, 0x0e, 0x84, 0x64, 0x9a, 0xf6, 0x6b, 0x23, 0x8c, 0x41, 0x94, 0xae, 0x66, 0x82, 0xf3, 0x44, 0xb2, 0x4f, 0x3b, 0xde, 0x02,
0x96, 0x36, 0x4b, 0x2b, 0x88, 0xdc, 0xd0, 0x72, 0x60, 0x42, 0xe1, 0xf5, 0x7f, 0x53, 0xa0, 0x46, 0x95, 0x0c, 0x60, 0x64, 0x92, 0x97, 0x94, 0xe5, 0xe5, 0xb8, 0x26, 0xd3, 0x0c, 0x18, 0x73, 0xb8,
0x0f, 0x1c, 0x92, 0xd7, 0x53, 0xfe, 0x94, 0xd0, 0xfa, 0xb6, 0x6d, 0x7b, 0x06, 0x09, 0x2d, 0xcf, 0xf3, 0x4f, 0x0e, 0x54, 0xe9, 0x82, 0x41, 0xd2, 0x7e, 0xc2, 0x1f, 0x1c, 0x5a, 0xdb, 0xd3, 0x75,
0xc5, 0x40, 0x23, 0x3b, 0x94, 0xef, 0xa1, 0x72, 0x12, 0x9b, 0xf7, 0x89, 0x6b, 0xda, 0x50, 0x91, 0x4b, 0x21, 0x4c, 0xb3, 0x4c, 0x0c, 0xd4, 0xd5, 0x19, 0x7f, 0x17, 0x95, 0x42, 0x6d, 0xde, 0x23,
0xea, 0x52, 0x63, 0x55, 0x7f, 0xe1, 0x68, 0x50, 0x5b, 0x1a, 0x0e, 0x6a, 0x65, 0x2c, 0x48, 0x71, 0xa6, 0xaa, 0x43, 0x99, 0xab, 0x71, 0xf5, 0x65, 0xf9, 0x7f, 0x67, 0xfd, 0xea, 0xc2, 0xa0, 0x5f,
0x0e, 0x2d, 0x77, 0xd0, 0x3a, 0xe9, 0x13, 0xcb, 0x26, 0x1d, 0x1b, 0x1e, 0xb9, 0x7b, 0x9e, 0x09, 0x2d, 0xe1, 0x84, 0x15, 0xa7, 0xd0, 0x7c, 0x1b, 0xad, 0x91, 0x1e, 0xd1, 0x74, 0xd2, 0xd6, 0xe1,
0xb4, 0xb2, 0x5c, 0x97, 0x1a, 0xa5, 0x66, 0x5d, 0xcd, 0xc4, 0x9f, 0x85, 0x4c, 0xed, 0x6f, 0xa9, 0x81, 0x79, 0x64, 0xa9, 0x40, 0xcb, 0xb9, 0x1a, 0x57, 0x2f, 0x36, 0x6a, 0x62, 0x4c, 0x7f, 0x4f,
0x0c, 0xd0, 0x06, 0x1b, 0x8c, 0xd0, 0x0b, 0xf4, 0x6b, 0xc3, 0x41, 0x6d, 0x7d, 0x3b, 0xa7, 0x8d, 0x32, 0xb1, 0xb7, 0x23, 0x7a, 0x80, 0x16, 0xe8, 0xa0, 0x30, 0xcb, 0x91, 0x37, 0x07, 0xfd, 0xea,
0x27, 0xf8, 0x64, 0x0d, 0xad, 0xd2, 0x1e, 0x09, 0x80, 0xed, 0x55, 0x0a, 0x75, 0xa9, 0xb1, 0xa2, 0xda, 0x5e, 0xca, 0x1b, 0x8f, 0xf1, 0xf1, 0x12, 0x5a, 0xa6, 0x5d, 0xe2, 0x80, 0xb7, 0x56, 0xce,
0x6f, 0xc4, 0xe6, 0xad, 0xb6, 0x13, 0x01, 0x1e, 0x63, 0x94, 0x1f, 0x97, 0xd1, 0x5a, 0xcb, 0x33, 0xd7, 0xb8, 0xfa, 0x92, 0xbc, 0x1e, 0x84, 0xb7, 0xdc, 0x0a, 0x0d, 0x78, 0x84, 0x11, 0x7e, 0xc8,
0xdb, 0x46, 0x0f, 0xcc, 0xc8, 0xb6, 0xdc, 0xae, 0xfc, 0x05, 0x5a, 0x61, 0xf1, 0x37, 0x49, 0x48, 0xa1, 0xcd, 0xa6, 0xa5, 0xb6, 0x94, 0x2e, 0xa8, 0xae, 0xae, 0x99, 0x9d, 0x7d, 0xcb, 0x64, 0xf0,
0xb8, 0x83, 0xa5, 0xe6, 0x9d, 0x8c, 0x79, 0x69, 0x18, 0x55, 0xff, 0xb0, 0xcb, 0x36, 0xa8, 0xca, 0x15, 0xe3, 0x1f, 0xa1, 0x25, 0x2f, 0x0d, 0x2a, 0x61, 0xc4, 0x3f, 0x67, 0xb1, 0x71, 0x3b, 0x16,
0xd0, 0xcc, 0xe0, 0x47, 0x9d, 0x2f, 0xc1, 0x08, 0x1f, 0x42, 0x48, 0x74, 0x39, 0x3e, 0x13, 0x8d, 0x65, 0xa4, 0xa6, 0x68, 0x9f, 0x76, 0xbc, 0x05, 0x2a, 0x7a, 0x68, 0x2f, 0xee, 0x07, 0xed, 0xcf,
0xf7, 0x70, 0xca, 0x2a, 0x7f, 0x84, 0x8a, 0xd4, 0x07, 0x23, 0x76, 0x5e, 0x53, 0xe7, 0x5d, 0x3e, 0x41, 0x61, 0xf7, 0x81, 0x11, 0x99, 0x0f, 0xb6, 0x46, 0xa3, 0x35, 0x1c, 0xb1, 0xf2, 0x0f, 0x51,
0x55, 0x30, 0xae, 0xed, 0x83, 0xa1, 0x5f, 0x89, 0xc9, 0x8b, 0x6c, 0x85, 0x39, 0x95, 0x7c, 0x80, 0x81, 0xda, 0xa0, 0x04, 0x1a, 0xdc, 0x11, 0xa7, 0xdd, 0x41, 0x71, 0x52, 0x8c, 0x2d, 0x1b, 0x14,
0x2e, 0xd3, 0x90, 0x84, 0x11, 0xe5, 0x4e, 0x97, 0x9a, 0x5b, 0x8b, 0x90, 0x72, 0x45, 0xbd, 0x1c, 0x79, 0x25, 0xd8, 0xa3, 0xe0, 0x7d, 0x61, 0x9f, 0x91, 0x7f, 0x84, 0xae, 0x51, 0x46, 0x98, 0x4b,
0xd3, 0x5e, 0x1e, 0xad, 0x71, 0x4c, 0xa8, 0xfc, 0x2c, 0xa1, 0x0d, 0x01, 0xff, 0xa1, 0x45, 0x43, 0x7d, 0x09, 0x8a, 0x8d, 0xdd, 0x2b, 0x70, 0xfb, 0xfe, 0x72, 0x29, 0x60, 0xbf, 0x36, 0xfc, 0xc6,
0xf9, 0xb3, 0x89, 0x28, 0xa9, 0xa7, 0x8b, 0x12, 0xd3, 0xe6, 0x31, 0x5a, 0x8f, 0xcf, 0x5b, 0x49, 0x01, 0xaf, 0xf0, 0x33, 0x87, 0xca, 0x93, 0xdc, 0x3e, 0xd0, 0x28, 0xe3, 0x3f, 0x1d, 0x93, 0x4e,
0x76, 0x32, 0x11, 0x6a, 0xa1, 0x4b, 0x56, 0x08, 0x0e, 0xbb, 0x1f, 0x85, 0x46, 0xa9, 0xf9, 0xd2, 0xbc, 0x9c, 0x74, 0x9e, 0xb7, 0x2f, 0xdc, 0x5a, 0xb0, 0xed, 0x52, 0xb8, 0x12, 0x93, 0xed, 0x63,
0x02, 0xde, 0xe8, 0x6b, 0x31, 0xef, 0xa5, 0x5d, 0xc6, 0x80, 0x47, 0x44, 0xca, 0xb7, 0x79, 0x2f, 0xb4, 0xa8, 0x31, 0x30, 0xbc, 0xbb, 0x93, 0xaf, 0x17, 0x1b, 0x8d, 0xd9, 0xcf, 0x26, 0xaf, 0x06,
0x58, 0xf0, 0xe4, 0xbb, 0xe8, 0x0a, 0xe5, 0x57, 0x0c, 0x4c, 0x76, 0x7f, 0xe2, 0x0b, 0x7d, 0x2d, 0xf4, 0x8b, 0x87, 0x1e, 0x11, 0x1e, 0xf2, 0x09, 0xcf, 0x32, 0xce, 0xe4, 0x09, 0xcb, 0xef, 0xa2,
0x66, 0xb8, 0xd2, 0xce, 0xc8, 0xb0, 0x80, 0x94, 0xdf, 0x44, 0x65, 0xdf, 0x0b, 0xc1, 0x0d, 0x2d, 0x15, 0xea, 0x5f, 0x46, 0x50, 0xbd, 0x9b, 0x16, 0x5c, 0xfd, 0xcd, 0x80, 0x68, 0xa5, 0x15, 0xb3,
0x62, 0x27, 0x57, 0xb9, 0xd0, 0x58, 0xd5, 0x65, 0x56, 0x08, 0x2d, 0x41, 0x82, 0x73, 0x48, 0xe5, 0xe1, 0x04, 0x92, 0x7f, 0x03, 0x95, 0x6c, 0x8b, 0x81, 0xc9, 0x34, 0xa2, 0x87, 0x97, 0x3e, 0x5f,
0x7b, 0x09, 0x6d, 0x4e, 0xc9, 0x80, 0xfc, 0xcd, 0xb8, 0xc0, 0xee, 0xdb, 0xc4, 0x72, 0x68, 0x45, 0x5f, 0x96, 0x79, 0xaf, 0x64, 0x9a, 0x09, 0x0b, 0x4e, 0x21, 0x85, 0x6f, 0x39, 0xb4, 0x9d, 0x9d,
0xe2, 0xee, 0xbf, 0x35, 0xdf, 0x7d, 0x9c, 0xd5, 0x99, 0x48, 0xeb, 0x44, 0x75, 0x8e, 0xa8, 0x71, 0x1d, 0xfe, 0xeb, 0x51, 0x45, 0xee, 0xeb, 0x44, 0x33, 0x68, 0x99, 0xf3, 0x35, 0x79, 0x73, 0xba,
0xee, 0x28, 0x5e, 0x08, 0x02, 0xe4, 0xa2, 0x15, 0x82, 0xe8, 0xe6, 0x7f, 0x54, 0x08, 0x22, 0xe9, 0x26, 0x38, 0xee, 0x33, 0xe2, 0x0e, 0x52, 0x3e, 0x56, 0xce, 0x43, 0x6a, 0x9c, 0xda, 0x4a, 0xf8,
0xfc, 0x42, 0x18, 0x4a, 0xa8, 0x2a, 0xe0, 0xef, 0x7b, 0x2e, 0x8d, 0x1c, 0x08, 0x30, 0x3c, 0x86, 0x2e, 0x87, 0x56, 0x13, 0x90, 0x39, 0x94, 0xcc, 0x87, 0x89, 0x92, 0x91, 0x66, 0x39, 0x66, 0x56,
0x00, 0x5c, 0x03, 0xe4, 0x97, 0xd1, 0x0a, 0xf1, 0xad, 0x07, 0x81, 0x17, 0xf9, 0xf1, 0x5d, 0x4a, 0xad, 0x9c, 0xa4, 0x6a, 0x65, 0x67, 0x16, 0xd2, 0xe9, 0x45, 0x32, 0xe0, 0x50, 0x25, 0x81, 0xdf,
0x6f, 0xf9, 0x76, 0x6b, 0x97, 0xef, 0xe3, 0x14, 0xc1, 0xd0, 0x89, 0x45, 0xdc, 0xda, 0x0c, 0x3a, 0xb7, 0x4c, 0xea, 0x1a, 0xe0, 0x60, 0x78, 0x0c, 0x0e, 0x98, 0x0a, 0xf0, 0x2f, 0xa3, 0x25, 0x62,
0x39, 0x07, 0xa7, 0x08, 0xb9, 0x8e, 0x8a, 0x2e, 0x71, 0xa0, 0x52, 0xe4, 0xc8, 0xd4, 0xf7, 0x3d, 0x6b, 0xf7, 0x1c, 0xcb, 0xb5, 0x83, 0x2b, 0x15, 0x5d, 0xfd, 0xbd, 0xe6, 0xa1, 0xbf, 0x8e, 0x23,
0xe2, 0x00, 0xe6, 0x12, 0x59, 0x47, 0x85, 0xc8, 0x32, 0x2b, 0x97, 0x38, 0xe0, 0x4e, 0x0c, 0x28, 0x84, 0x87, 0x0e, 0x23, 0xf2, 0xa3, 0x8d, 0xa1, 0xc3, 0x7d, 0x70, 0x84, 0xe0, 0x6b, 0xa8, 0x60,
0xec, 0xef, 0xee, 0x3c, 0x1f, 0xd4, 0x5e, 0x9c, 0xf5, 0x12, 0x84, 0x4f, 0x7c, 0xa0, 0xea, 0xfe, 0x12, 0x03, 0xca, 0x05, 0x1f, 0x19, 0x9d, 0xfd, 0x88, 0x18, 0x80, 0x7d, 0x0b, 0x2f, 0xa3, 0xbc,
0xee, 0x0e, 0x66, 0xca, 0xbc, 0xda, 0x05, 0x27, 0x2f, 0x5c, 0xb5, 0x0b, 0xd6, 0xcd, 0xa8, 0xf6, 0xab, 0xa9, 0xe5, 0x45, 0x1f, 0x70, 0x3b, 0x00, 0xe4, 0x8f, 0x0f, 0x0f, 0x5e, 0xf4, 0xab, 0xff,
0x1f, 0x24, 0x54, 0x17, 0x70, 0x2d, 0x12, 0x10, 0x07, 0x42, 0x08, 0xe8, 0x59, 0x93, 0x55, 0x47, 0xcf, 0x7a, 0x3a, 0xd8, 0x13, 0x1b, 0xa8, 0x78, 0x7c, 0x78, 0x80, 0x3d, 0x67, 0xe1, 0x47, 0x0e,
0xc5, 0x43, 0xcb, 0x35, 0xf9, 0x5d, 0xcd, 0x84, 0xff, 0x03, 0xcb, 0x35, 0x31, 0x97, 0xa4, 0x09, 0xad, 0x27, 0x0e, 0x39, 0x87, 0x16, 0xd0, 0x4c, 0xb6, 0x80, 0x97, 0x66, 0x48, 0x59, 0x46, 0xed,
0x2a, 0xcc, 0x4a, 0x90, 0xf2, 0x54, 0x42, 0xb7, 0xe7, 0x56, 0x6b, 0xca, 0x21, 0xcd, 0x4c, 0xf2, 0x7f, 0xc3, 0xa1, 0x5a, 0x02, 0xd7, 0x24, 0x0e, 0x31, 0x80, 0x81, 0x43, 0xaf, 0x9a, 0xac, 0x1a,
0x3b, 0xe8, 0x6a, 0xe4, 0xd2, 0xc8, 0x0a, 0xd9, 0xf3, 0x95, 0xed, 0x3c, 0x9b, 0xc3, 0x41, 0xed, 0x2a, 0x9c, 0x6a, 0xa6, 0xea, 0xdf, 0xd5, 0x98, 0xfc, 0xef, 0x6b, 0xa6, 0x8a, 0x7d, 0x4b, 0x94,
0xea, 0xbe, 0x28, 0xc2, 0x79, 0xac, 0xf2, 0xd3, 0x72, 0x2e, 0xbf, 0xbc, 0x0f, 0x3e, 0x40, 0x1b, 0xa0, 0x7c, 0x56, 0x82, 0x84, 0xa7, 0x1c, 0xba, 0x35, 0xb5, 0x5a, 0x23, 0x0e, 0x2e, 0x33, 0xc9,
0x99, 0x76, 0x40, 0xe9, 0xde, 0xd8, 0x86, 0x1b, 0xb1, 0x0d, 0x59, 0xad, 0x11, 0x00, 0x4f, 0xea, 0x6f, 0xa3, 0xeb, 0xae, 0x49, 0x5d, 0x8d, 0x79, 0xef, 0x5d, 0xbc, 0x01, 0x6d, 0x0c, 0xfa, 0xd5,
0xc8, 0x5f, 0xa3, 0x35, 0x3f, 0x1b, 0xea, 0xb8, 0xb4, 0xef, 0x2d, 0x90, 0xd2, 0x29, 0xa9, 0xd2, 0xeb, 0xc7, 0x49, 0x13, 0x4e, 0x63, 0x85, 0xef, 0x73, 0xa9, 0xfc, 0xfa, 0xed, 0xf0, 0x1e, 0x5a,
0x37, 0x86, 0x83, 0xda, 0x9a, 0x20, 0xc0, 0xe2, 0x39, 0x72, 0x0b, 0x95, 0x49, 0x3a, 0xb0, 0x3c, 0x8f, 0xb5, 0x03, 0x4a, 0x8f, 0x46, 0x31, 0xdc, 0x08, 0x62, 0x88, 0x7b, 0x0d, 0x01, 0x78, 0xdc,
0x64, 0xbd, 0x7c, 0x94, 0x86, 0x46, 0xd2, 0xfe, 0xb6, 0x05, 0xe9, 0xf3, 0x89, 0x1d, 0x9c, 0xd3, 0x87, 0xff, 0x12, 0xad, 0xda, 0x71, 0xa9, 0x83, 0xd2, 0xbe, 0x3b, 0x43, 0x4a, 0x27, 0xa4, 0x4a,
0x57, 0xfe, 0x5a, 0x46, 0x9b, 0x53, 0xda, 0x83, 0xdc, 0x44, 0xc8, 0x0c, 0xac, 0x3e, 0x04, 0x99, 0x5e, 0x1f, 0xf4, 0xab, 0xab, 0x09, 0x03, 0x4e, 0xee, 0xc3, 0x37, 0x51, 0x89, 0x44, 0x13, 0xce,
0x20, 0xa5, 0x6d, 0x6e, 0x27, 0x95, 0xe0, 0x0c, 0x4a, 0xfe, 0x1c, 0xa1, 0x31, 0x7b, 0x1c, 0x13, 0x7d, 0xaf, 0xa5, 0x0f, 0xd3, 0x50, 0x0f, 0xdb, 0xdf, 0x5e, 0xc2, 0xfa, 0x62, 0x6c, 0x05, 0xa7,
0x75, 0x7e, 0x4c, 0xf2, 0xe3, 0x97, 0x5e, 0x66, 0xfc, 0x99, 0xdd, 0x0c, 0xa3, 0x4c, 0x51, 0x29, 0xfc, 0x85, 0x3f, 0x73, 0x68, 0x63, 0x42, 0x7b, 0xe0, 0x1b, 0x08, 0xa9, 0x8e, 0xd6, 0x03, 0x27,
0x00, 0x0a, 0x41, 0x1f, 0xcc, 0xf7, 0xbc, 0xa0, 0x52, 0xe0, 0x75, 0xf4, 0xf6, 0x02, 0x41, 0x9f, 0x26, 0x52, 0xd4, 0xe6, 0x0e, 0x22, 0x0b, 0x8e, 0xa1, 0xf8, 0xcf, 0x10, 0x1a, 0xb1, 0x07, 0x9a,
0x68, 0x65, 0xfa, 0x66, 0xec, 0x52, 0x09, 0x8f, 0x89, 0x71, 0xf6, 0x14, 0xb9, 0x8d, 0xae, 0x9b, 0x88, 0xd3, 0x35, 0x49, 0xcf, 0x6b, 0x72, 0xc9, 0xe3, 0x8f, 0xad, 0xc6, 0x18, 0x79, 0x8a, 0x8a,
0x40, 0x32, 0x66, 0x7e, 0x15, 0x01, 0x0d, 0xc1, 0xe4, 0x1d, 0x6a, 0x45, 0xbf, 0x1d, 0x13, 0x5c, 0x0e, 0x50, 0x70, 0x7a, 0xa0, 0xbe, 0x6b, 0x39, 0xe5, 0xbc, 0x5f, 0x47, 0x6f, 0xcd, 0x20, 0xfa,
0xdf, 0x99, 0x06, 0xc2, 0xd3, 0x75, 0x95, 0xdf, 0x24, 0x74, 0x5d, 0xb0, 0xec, 0x63, 0x70, 0x7c, 0x58, 0x2b, 0x93, 0x37, 0x82, 0x23, 0x15, 0xf1, 0x88, 0x18, 0xc7, 0x77, 0xe1, 0x5b, 0x68, 0x4b,
0x9b, 0x84, 0x70, 0x0e, 0xcf, 0xd1, 0x81, 0xf0, 0x1c, 0xbd, 0xb1, 0x40, 0xf8, 0x12, 0x23, 0x67, 0x05, 0x12, 0x0b, 0xf3, 0x0b, 0x17, 0x28, 0x03, 0xd5, 0xef, 0x50, 0x4b, 0xf2, 0xad, 0x80, 0x60,
0x3d, 0x4b, 0xca, 0xaf, 0x12, 0xba, 0x31, 0x55, 0xe3, 0x1c, 0xda, 0xeb, 0x27, 0x62, 0x7b, 0x7d, 0xeb, 0x60, 0x12, 0x08, 0x4f, 0xf6, 0x15, 0x7e, 0xe5, 0xd0, 0x56, 0x22, 0xb2, 0x8f, 0xc0, 0xb0,
0xf5, 0x0c, 0x7e, 0xcd, 0x68, 0xb3, 0xc7, 0xb3, 0xbc, 0xe2, 0x4d, 0xe5, 0xff, 0x38, 0x3f, 0x28, 0x75, 0xc2, 0x60, 0x0e, 0xcf, 0xd1, 0x49, 0xe2, 0x39, 0x7a, 0x7d, 0x06, 0xf9, 0xc2, 0x20, 0xb3,
0x7f, 0x8b, 0x63, 0x10, 0xa5, 0xe7, 0xe0, 0x86, 0xd8, 0x51, 0x96, 0x4f, 0xd5, 0x51, 0x26, 0x1a, 0x9e, 0x25, 0xe1, 0x17, 0x0e, 0xdd, 0x98, 0xe8, 0x31, 0x87, 0xf6, 0xfa, 0x30, 0xd9, 0x5e, 0x5f,
0x6d, 0x61, 0xc1, 0x46, 0x4b, 0xe9, 0xd9, 0x1a, 0xed, 0x01, 0x5a, 0x13, 0x5f, 0x9f, 0xe2, 0x29, 0xbd, 0xc2, 0xb9, 0x32, 0xda, 0xec, 0x79, 0xd6, 0xa9, 0x5a, 0xc3, 0xb1, 0xf5, 0xbf, 0x37, 0x3f,
0x3f, 0xe1, 0x38, 0x75, 0x5b, 0x78, 0x9d, 0x44, 0xa6, 0xfc, 0xec, 0x41, 0xe9, 0x45, 0x9e, 0x3d, 0x08, 0x7f, 0x25, 0xc7, 0x20, 0x4a, 0xe7, 0x70, 0x8c, 0x64, 0x47, 0xc9, 0x5d, 0xaa, 0xa3, 0x8c,
0x28, 0x9d, 0x51, 0x14, 0xbf, 0x88, 0xb3, 0xc7, 0xd4, 0x38, 0x9f, 0xff, 0xec, 0xc1, 0xbe, 0x8c, 0x35, 0xda, 0xfc, 0x8c, 0x8d, 0x96, 0xd2, 0xab, 0x35, 0xda, 0x13, 0xb4, 0x9a, 0x7c, 0x7d, 0x0a,
0xd9, 0x5f, 0xea, 0x13, 0x23, 0x99, 0x21, 0xd3, 0x2f, 0xe3, 0xbd, 0x44, 0x80, 0xc7, 0x18, 0x5d, 0x97, 0xfc, 0xcd, 0xe7, 0x53, 0xb7, 0x12, 0xaf, 0x53, 0x92, 0x29, 0x3d, 0x7b, 0x50, 0xfa, 0x6f,
0x3f, 0x3a, 0xa9, 0x2e, 0x1d, 0x9f, 0x54, 0x97, 0x9e, 0x9d, 0x54, 0x97, 0x9e, 0x0e, 0xab, 0xd2, 0x9e, 0x3d, 0x28, 0xcd, 0x28, 0x8a, 0x9f, 0x92, 0xb3, 0xc7, 0x44, 0x9d, 0xe7, 0x3f, 0x7b, 0x78,
0xd1, 0xb0, 0x2a, 0x1d, 0x0f, 0xab, 0xd2, 0xb3, 0x61, 0x55, 0xfa, 0x7d, 0x58, 0x95, 0xbe, 0xfb, 0x3f, 0xa5, 0xbd, 0x7f, 0xa9, 0x4d, 0x94, 0x70, 0x86, 0x8c, 0x7e, 0x4a, 0x1f, 0x85, 0x06, 0x3c,
0xa3, 0xba, 0xf4, 0xe9, 0xad, 0x79, 0xbf, 0xb3, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x93, 0xfa, 0xc2, 0xc8, 0xf2, 0xd9, 0x45, 0x65, 0xe1, 0xfc, 0xa2, 0xb2, 0xf0, 0xfc, 0xa2, 0xb2, 0xf0, 0x74,
0x8c, 0x28, 0x9f, 0x11, 0x00, 0x00, 0x50, 0xe1, 0xce, 0x06, 0x15, 0xee, 0x7c, 0x50, 0xe1, 0x9e, 0x0f, 0x2a, 0xdc, 0x6f, 0x83, 0x0a,
0xf7, 0xec, 0xf7, 0xca, 0xc2, 0x27, 0x37, 0xa7, 0xfd, 0x61, 0xe6, 0xef, 0x00, 0x00, 0x00, 0xff,
0xff, 0x94, 0x38, 0x0b, 0x13, 0xd0, 0x11, 0x00, 0x00,
} }
func (m *AllocationResult) Marshal() (dAtA []byte, err error) { func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
@ -701,7 +703,7 @@ func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *PodScheduling) Marshal() (dAtA []byte, err error) { func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size]) n, err := m.MarshalToSizedBuffer(dAtA[:size])
@ -711,12 +713,12 @@ func (m *PodScheduling) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil return dAtA[:n], nil
} }
func (m *PodScheduling) MarshalTo(dAtA []byte) (int, error) { func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) {
size := m.Size() size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size]) return m.MarshalToSizedBuffer(dAtA[:size])
} }
func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA) i := len(dAtA)
_ = i _ = i
var l int var l int
@ -754,7 +756,7 @@ func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) { func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size]) n, err := m.MarshalToSizedBuffer(dAtA[:size])
@ -764,12 +766,12 @@ func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil return dAtA[:n], nil
} }
func (m *PodSchedulingList) MarshalTo(dAtA []byte) (int, error) { func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size() size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size]) return m.MarshalToSizedBuffer(dAtA[:size])
} }
func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA) i := len(dAtA)
_ = i _ = i
var l int var l int
@ -801,7 +803,7 @@ func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) { func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size]) n, err := m.MarshalToSizedBuffer(dAtA[:size])
@ -811,12 +813,12 @@ func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil return dAtA[:n], nil
} }
func (m *PodSchedulingSpec) MarshalTo(dAtA []byte) (int, error) { func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size() size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size]) return m.MarshalToSizedBuffer(dAtA[:size])
} }
func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA) i := len(dAtA)
_ = i _ = i
var l int var l int
@ -838,7 +840,7 @@ func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) { func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size]) n, err := m.MarshalToSizedBuffer(dAtA[:size])
@ -848,12 +850,12 @@ func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil return dAtA[:n], nil
} }
func (m *PodSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size() size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size]) return m.MarshalToSizedBuffer(dAtA[:size])
} }
func (m *PodSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA) i := len(dAtA)
_ = i _ = i
var l int var l int
@ -1512,7 +1514,7 @@ func (m *AllocationResult) Size() (n int) {
return n return n
} }
func (m *PodScheduling) Size() (n int) { func (m *PodSchedulingContext) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
} }
@ -1527,7 +1529,7 @@ func (m *PodScheduling) Size() (n int) {
return n return n
} }
func (m *PodSchedulingList) Size() (n int) { func (m *PodSchedulingContextList) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
} }
@ -1544,7 +1546,7 @@ func (m *PodSchedulingList) Size() (n int) {
return n return n
} }
func (m *PodSchedulingSpec) Size() (n int) { func (m *PodSchedulingContextSpec) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
} }
@ -1561,7 +1563,7 @@ func (m *PodSchedulingSpec) Size() (n int) {
return n return n
} }
func (m *PodSchedulingStatus) Size() (n int) { func (m *PodSchedulingContextStatus) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
} }
@ -1812,46 +1814,46 @@ func (this *AllocationResult) String() string {
}, "") }, "")
return s return s
} }
func (this *PodScheduling) String() string { func (this *PodSchedulingContext) String() string {
if this == nil { if this == nil {
return "nil" return "nil"
} }
s := strings.Join([]string{`&PodScheduling{`, s := strings.Join([]string{`&PodSchedulingContext{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingSpec", "PodSchedulingSpec", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingStatus", "PodSchedulingStatus", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
} }
func (this *PodSchedulingList) String() string { func (this *PodSchedulingContextList) String() string {
if this == nil { if this == nil {
return "nil" return "nil"
} }
repeatedStringForItems := "[]PodScheduling{" repeatedStringForItems := "[]PodSchedulingContext{"
for _, f := range this.Items { for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodScheduling", "PodScheduling", 1), `&`, ``, 1) + "," repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + ","
} }
repeatedStringForItems += "}" repeatedStringForItems += "}"
s := strings.Join([]string{`&PodSchedulingList{`, s := strings.Join([]string{`&PodSchedulingContextList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`, `Items:` + repeatedStringForItems + `,`,
`}`, `}`,
}, "") }, "")
return s return s
} }
func (this *PodSchedulingSpec) String() string { func (this *PodSchedulingContextSpec) String() string {
if this == nil { if this == nil {
return "nil" return "nil"
} }
s := strings.Join([]string{`&PodSchedulingSpec{`, s := strings.Join([]string{`&PodSchedulingContextSpec{`,
`SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`,
`PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
} }
func (this *PodSchedulingStatus) String() string { func (this *PodSchedulingContextStatus) String() string {
if this == nil { if this == nil {
return "nil" return "nil"
} }
@ -1860,7 +1862,7 @@ func (this *PodSchedulingStatus) String() string {
repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + ","
} }
repeatedStringForResourceClaims += "}" repeatedStringForResourceClaims += "}"
s := strings.Join([]string{`&PodSchedulingStatus{`, s := strings.Join([]string{`&PodSchedulingContextStatus{`,
`ResourceClaims:` + repeatedStringForResourceClaims + `,`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`,
`}`, `}`,
}, "") }, "")
@ -2186,7 +2188,7 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *PodScheduling) Unmarshal(dAtA []byte) error { func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
@ -2209,10 +2211,10 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 { if wireType == 4 {
return fmt.Errorf("proto: PodScheduling: wiretype end group for non-group") return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group")
} }
if fieldNum <= 0 { if fieldNum <= 0 {
return fmt.Errorf("proto: PodScheduling: illegal tag %d (wire type %d)", fieldNum, wire) return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire)
} }
switch fieldNum { switch fieldNum {
case 1: case 1:
@ -2335,7 +2337,7 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
@ -2358,10 +2360,10 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 { if wireType == 4 {
return fmt.Errorf("proto: PodSchedulingList: wiretype end group for non-group") return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group")
} }
if fieldNum <= 0 { if fieldNum <= 0 {
return fmt.Errorf("proto: PodSchedulingList: illegal tag %d (wire type %d)", fieldNum, wire) return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire)
} }
switch fieldNum { switch fieldNum {
case 1: case 1:
@ -2426,7 +2428,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Items = append(m.Items, PodScheduling{}) m.Items = append(m.Items, PodSchedulingContext{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err return err
} }
@ -2452,7 +2454,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
@ -2475,10 +2477,10 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 { if wireType == 4 {
return fmt.Errorf("proto: PodSchedulingSpec: wiretype end group for non-group") return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group")
} }
if fieldNum <= 0 { if fieldNum <= 0 {
return fmt.Errorf("proto: PodSchedulingSpec: illegal tag %d (wire type %d)", fieldNum, wire) return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire)
} }
switch fieldNum { switch fieldNum {
case 1: case 1:
@ -2566,7 +2568,7 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error { func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0
for iNdEx < l { for iNdEx < l {
@ -2589,10 +2591,10 @@ func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3) fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7) wireType := int(wire & 0x7)
if wireType == 4 { if wireType == 4 {
return fmt.Errorf("proto: PodSchedulingStatus: wiretype end group for non-group") return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group")
} }
if fieldNum <= 0 { if fieldNum <= 0 {
return fmt.Errorf("proto: PodSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire)
} }
switch fieldNum { switch fieldNum {
case 1: case 1:

View File

@ -56,37 +56,37 @@ message AllocationResult {
optional bool shareable = 3; optional bool shareable = 3;
} }
// PodScheduling objects hold information that is needed to schedule // PodSchedulingContext objects hold information that is needed to schedule
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
// mode. // mode.
// //
// This is an alpha type and requires enabling the DynamicResourceAllocation // This is an alpha type and requires enabling the DynamicResourceAllocation
// feature gate. // feature gate.
message PodScheduling { message PodSchedulingContext {
// Standard object metadata // Standard object metadata
// +optional // +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec describes where resources for the Pod are needed. // Spec describes where resources for the Pod are needed.
optional PodSchedulingSpec spec = 2; optional PodSchedulingContextSpec spec = 2;
// Status describes where resources for the Pod can be allocated. // Status describes where resources for the Pod can be allocated.
// +optional // +optional
optional PodSchedulingStatus status = 3; optional PodSchedulingContextStatus status = 3;
} }
// PodSchedulingList is a collection of Pod scheduling objects. // PodSchedulingContextList is a collection of Pod scheduling objects.
message PodSchedulingList { message PodSchedulingContextList {
// Standard list metadata // Standard list metadata
// +optional // +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of PodScheduling objects. // Items is the list of PodSchedulingContext objects.
repeated PodScheduling items = 2; repeated PodSchedulingContext items = 2;
} }
// PodSchedulingSpec describes where resources for the Pod are needed. // PodSchedulingContextSpec describes where resources for the Pod are needed.
message PodSchedulingSpec { message PodSchedulingContextSpec {
// SelectedNode is the node for which allocation of ResourceClaims that // SelectedNode is the node for which allocation of ResourceClaims that
// are referenced by the Pod and that use "WaitForFirstConsumer" // are referenced by the Pod and that use "WaitForFirstConsumer"
// allocation is to be attempted. // allocation is to be attempted.
@ -105,8 +105,8 @@ message PodSchedulingSpec {
repeated string potentialNodes = 2; repeated string potentialNodes = 2;
} }
// PodSchedulingStatus describes where resources for the Pod can be allocated. // PodSchedulingContextStatus describes where resources for the Pod can be allocated.
message PodSchedulingStatus { message PodSchedulingContextStatus {
// ResourceClaims describes resource availability for each // ResourceClaims describes resource availability for each
// pod.spec.resourceClaim entry where the corresponding ResourceClaim // pod.spec.resourceClaim entry where the corresponding ResourceClaim
// uses "WaitForFirstConsumer" allocation mode. // uses "WaitForFirstConsumer" allocation mode.

View File

@ -50,8 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ResourceClaimList{}, &ResourceClaimList{},
&ResourceClaimTemplate{}, &ResourceClaimTemplate{},
&ResourceClaimTemplateList{}, &ResourceClaimTemplateList{},
&PodScheduling{}, &PodSchedulingContext{},
&PodSchedulingList{}, &PodSchedulingContextList{},
) )
// Add common types // Add common types

View File

@ -181,28 +181,28 @@ type ResourceClaimList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.26 // +k8s:prerelease-lifecycle-gen:introduced=1.26
// PodScheduling objects hold information that is needed to schedule // PodSchedulingContext objects hold information that is needed to schedule
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
// mode. // mode.
// //
// This is an alpha type and requires enabling the DynamicResourceAllocation // This is an alpha type and requires enabling the DynamicResourceAllocation
// feature gate. // feature gate.
type PodScheduling struct { type PodSchedulingContext struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
// Standard object metadata // Standard object metadata
// +optional // +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec describes where resources for the Pod are needed. // Spec describes where resources for the Pod are needed.
Spec PodSchedulingSpec `json:"spec" protobuf:"bytes,2,name=spec"` Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"`
// Status describes where resources for the Pod can be allocated. // Status describes where resources for the Pod can be allocated.
// +optional // +optional
Status PodSchedulingStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
} }
// PodSchedulingSpec describes where resources for the Pod are needed. // PodSchedulingContextSpec describes where resources for the Pod are needed.
type PodSchedulingSpec struct { type PodSchedulingContextSpec struct {
// SelectedNode is the node for which allocation of ResourceClaims that // SelectedNode is the node for which allocation of ResourceClaims that
// are referenced by the Pod and that use "WaitForFirstConsumer" // are referenced by the Pod and that use "WaitForFirstConsumer"
// allocation is to be attempted. // allocation is to be attempted.
@ -221,8 +221,8 @@ type PodSchedulingSpec struct {
PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
} }
// PodSchedulingStatus describes where resources for the Pod can be allocated. // PodSchedulingContextStatus describes where resources for the Pod can be allocated.
type PodSchedulingStatus struct { type PodSchedulingContextStatus struct {
// ResourceClaims describes resource availability for each // ResourceClaims describes resource availability for each
// pod.spec.resourceClaim entry where the corresponding ResourceClaim // pod.spec.resourceClaim entry where the corresponding ResourceClaim
// uses "WaitForFirstConsumer" allocation mode. // uses "WaitForFirstConsumer" allocation mode.
@ -257,22 +257,22 @@ type ResourceClaimSchedulingStatus struct {
} }
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the // PodSchedulingNodeListMaxSize defines the maximum number of entries in the
// node lists that are stored in PodScheduling objects. This limit is part // node lists that are stored in PodSchedulingContext objects. This limit is part
// of the API. // of the API.
const PodSchedulingNodeListMaxSize = 128 const PodSchedulingNodeListMaxSize = 128
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.26 // +k8s:prerelease-lifecycle-gen:introduced=1.26
// PodSchedulingList is a collection of Pod scheduling objects. // PodSchedulingContextList is a collection of Pod scheduling objects.
type PodSchedulingList struct { type PodSchedulingContextList struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
// Standard list metadata // Standard list metadata
// +optional // +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of PodScheduling objects. // Items is the list of PodSchedulingContext objects.
Items []PodScheduling `json:"items" protobuf:"bytes,2,rep,name=items"` Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"`
} }
// +genclient // +genclient

View File

@ -38,44 +38,44 @@ func (AllocationResult) SwaggerDoc() map[string]string {
return map_AllocationResult return map_AllocationResult
} }
var map_PodScheduling = map[string]string{ var map_PodSchedulingContext = map[string]string{
"": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"metadata": "Standard object metadata", "metadata": "Standard object metadata",
"spec": "Spec describes where resources for the Pod are needed.", "spec": "Spec describes where resources for the Pod are needed.",
"status": "Status describes where resources for the Pod can be allocated.", "status": "Status describes where resources for the Pod can be allocated.",
} }
func (PodScheduling) SwaggerDoc() map[string]string { func (PodSchedulingContext) SwaggerDoc() map[string]string {
return map_PodScheduling return map_PodSchedulingContext
} }
var map_PodSchedulingList = map[string]string{ var map_PodSchedulingContextList = map[string]string{
"": "PodSchedulingList is a collection of Pod scheduling objects.", "": "PodSchedulingContextList is a collection of Pod scheduling objects.",
"metadata": "Standard list metadata", "metadata": "Standard list metadata",
"items": "Items is the list of PodScheduling objects.", "items": "Items is the list of PodSchedulingContext objects.",
} }
func (PodSchedulingList) SwaggerDoc() map[string]string { func (PodSchedulingContextList) SwaggerDoc() map[string]string {
return map_PodSchedulingList return map_PodSchedulingContextList
} }
var map_PodSchedulingSpec = map[string]string{ var map_PodSchedulingContextSpec = map[string]string{
"": "PodSchedulingSpec describes where resources for the Pod are needed.", "": "PodSchedulingContextSpec describes where resources for the Pod are needed.",
"selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.",
"potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.",
} }
func (PodSchedulingSpec) SwaggerDoc() map[string]string { func (PodSchedulingContextSpec) SwaggerDoc() map[string]string {
return map_PodSchedulingSpec return map_PodSchedulingContextSpec
} }
var map_PodSchedulingStatus = map[string]string{ var map_PodSchedulingContextStatus = map[string]string{
"": "PodSchedulingStatus describes where resources for the Pod can be allocated.", "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
"resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.",
} }
func (PodSchedulingStatus) SwaggerDoc() map[string]string { func (PodSchedulingContextStatus) SwaggerDoc() map[string]string {
return map_PodSchedulingStatus return map_PodSchedulingContextStatus
} }
var map_ResourceClaim = map[string]string{ var map_ResourceClaim = map[string]string{

View File

@ -48,7 +48,7 @@ func (in *AllocationResult) DeepCopy() *AllocationResult {
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
@ -57,18 +57,18 @@ func (in *PodScheduling) DeepCopyInto(out *PodScheduling) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScheduling. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext.
func (in *PodScheduling) DeepCopy() *PodScheduling { func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodScheduling) out := new(PodSchedulingContext)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodScheduling) DeepCopyObject() runtime.Object { func (in *PodSchedulingContext) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil { if c := in.DeepCopy(); c != nil {
return c return c
} }
@ -76,13 +76,13 @@ func (in *PodScheduling) DeepCopyObject() runtime.Object {
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta) in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil { if in.Items != nil {
in, out := &in.Items, &out.Items in, out := &in.Items, &out.Items
*out = make([]PodScheduling, len(*in)) *out = make([]PodSchedulingContext, len(*in))
for i := range *in { for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i]) (*in)[i].DeepCopyInto(&(*out)[i])
} }
@ -90,18 +90,18 @@ func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingList. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList.
func (in *PodSchedulingList) DeepCopy() *PodSchedulingList { func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodSchedulingList) out := new(PodSchedulingContextList)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSchedulingList) DeepCopyObject() runtime.Object { func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil { if c := in.DeepCopy(); c != nil {
return c return c
} }
@ -109,7 +109,7 @@ func (in *PodSchedulingList) DeepCopyObject() runtime.Object {
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) {
*out = *in *out = *in
if in.PotentialNodes != nil { if in.PotentialNodes != nil {
in, out := &in.PotentialNodes, &out.PotentialNodes in, out := &in.PotentialNodes, &out.PotentialNodes
@ -119,18 +119,18 @@ func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingSpec. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec.
func (in *PodSchedulingSpec) DeepCopy() *PodSchedulingSpec { func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodSchedulingSpec) out := new(PodSchedulingContextSpec)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) {
*out = *in *out = *in
if in.ResourceClaims != nil { if in.ResourceClaims != nil {
in, out := &in.ResourceClaims, &out.ResourceClaims in, out := &in.ResourceClaims, &out.ResourceClaims
@ -142,12 +142,12 @@ func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) {
return return
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingStatus. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus.
func (in *PodSchedulingStatus) DeepCopy() *PodSchedulingStatus { func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus {
if in == nil { if in == nil {
return nil return nil
} }
out := new(PodSchedulingStatus) out := new(PodSchedulingContextStatus)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }

View File

@ -1,5 +1,5 @@
{ {
"kind": "PodScheduling", "kind": "PodSchedulingContext",
"apiVersion": "resource.k8s.io/v1alpha2", "apiVersion": "resource.k8s.io/v1alpha2",
"metadata": { "metadata": {
"name": "nameValue", "name": "nameValue",

View File

@ -1,5 +1,5 @@
apiVersion: resource.k8s.io/v1alpha2 apiVersion: resource.k8s.io/v1alpha2
kind: PodScheduling kind: PodSchedulingContext
metadata: metadata:
annotations: annotations:
annotationsKey: annotationsValue annotationsKey: annotationsValue

View File

@ -11376,7 +11376,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: shareable - name: shareable
type: type:
scalar: boolean scalar: boolean
- name: io.k8s.api.resource.v1alpha2.PodScheduling - name: io.k8s.api.resource.v1alpha2.PodSchedulingContext
map: map:
fields: fields:
- name: apiVersion - name: apiVersion
@ -11391,13 +11391,13 @@ var schemaYAML = typed.YAMLObject(`types:
default: {} default: {}
- name: spec - name: spec
type: type:
namedType: io.k8s.api.resource.v1alpha2.PodSchedulingSpec namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec
default: {} default: {}
- name: status - name: status
type: type:
namedType: io.k8s.api.resource.v1alpha2.PodSchedulingStatus namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus
default: {} default: {}
- name: io.k8s.api.resource.v1alpha2.PodSchedulingSpec - name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec
map: map:
fields: fields:
- name: potentialNodes - name: potentialNodes
@ -11409,7 +11409,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: selectedNode - name: selectedNode
type: type:
scalar: string scalar: string
- name: io.k8s.api.resource.v1alpha2.PodSchedulingStatus - name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus
map: map:
fields: fields:
- name: resourceClaims - name: resourceClaims

View File

@ -27,58 +27,58 @@ import (
v1 "k8s.io/client-go/applyconfigurations/meta/v1" v1 "k8s.io/client-go/applyconfigurations/meta/v1"
) )
// PodSchedulingApplyConfiguration represents an declarative configuration of the PodScheduling type for use // PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use
// with apply. // with apply.
type PodSchedulingApplyConfiguration struct { type PodSchedulingContextApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"` v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
Spec *PodSchedulingSpecApplyConfiguration `json:"spec,omitempty"` Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"`
Status *PodSchedulingStatusApplyConfiguration `json:"status,omitempty"` Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"`
} }
// PodScheduling constructs an declarative configuration of the PodScheduling type for use with // PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with
// apply. // apply.
func PodScheduling(name, namespace string) *PodSchedulingApplyConfiguration { func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration {
b := &PodSchedulingApplyConfiguration{} b := &PodSchedulingContextApplyConfiguration{}
b.WithName(name) b.WithName(name)
b.WithNamespace(namespace) b.WithNamespace(namespace)
b.WithKind("PodScheduling") b.WithKind("PodSchedulingContext")
b.WithAPIVersion("resource.k8s.io/v1alpha2") b.WithAPIVersion("resource.k8s.io/v1alpha2")
return b return b
} }
// ExtractPodScheduling extracts the applied configuration owned by fieldManager from // ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from
// podScheduling. If no managedFields are found in podScheduling for fieldManager, a // podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a
// PodSchedulingApplyConfiguration is returned with only the Name, Namespace (if applicable), // PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable),
// APIVersion and Kind populated. It is possible that no managed fields were found for because other // APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because // field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields. // the fieldManager never owned fields any fields.
// podScheduling must be a unmodified PodScheduling API object that was retrieved from the Kubernetes API. // podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API.
// ExtractPodScheduling provides a way to perform a extract/modify-in-place/apply workflow. // ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields. // applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental! // Experimental!
func ExtractPodScheduling(podScheduling *resourcev1alpha2.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
return extractPodScheduling(podScheduling, fieldManager, "") return extractPodSchedulingContext(podSchedulingContext, fieldManager, "")
} }
// ExtractPodSchedulingStatus is the same as ExtractPodScheduling except // ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except
// that it extracts the status subresource applied configuration. // that it extracts the status subresource applied configuration.
// Experimental! // Experimental!
func ExtractPodSchedulingStatus(podScheduling *resourcev1alpha2.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
return extractPodScheduling(podScheduling, fieldManager, "status") return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status")
} }
func extractPodScheduling(podScheduling *resourcev1alpha2.PodScheduling, fieldManager string, subresource string) (*PodSchedulingApplyConfiguration, error) { func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) {
b := &PodSchedulingApplyConfiguration{} b := &PodSchedulingContextApplyConfiguration{}
err := managedfields.ExtractInto(podScheduling, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodScheduling"), fieldManager, b, subresource) err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource)
if err != nil { if err != nil {
return nil, err return nil, err
} }
b.WithName(podScheduling.Name) b.WithName(podSchedulingContext.Name)
b.WithNamespace(podScheduling.Namespace) b.WithNamespace(podSchedulingContext.Namespace)
b.WithKind("PodScheduling") b.WithKind("PodSchedulingContext")
b.WithAPIVersion("resource.k8s.io/v1alpha2") b.WithAPIVersion("resource.k8s.io/v1alpha2")
return b, nil return b, nil
} }
@ -86,7 +86,7 @@ func extractPodScheduling(podScheduling *resourcev1alpha2.PodScheduling, fieldMa
// WithKind sets the Kind field in the declarative configuration to the given value // WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call. // If called multiple times, the Kind field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration {
b.Kind = &value b.Kind = &value
return b return b
} }
@ -94,7 +94,7 @@ func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingA
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call. // If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration {
b.APIVersion = &value b.APIVersion = &value
return b return b
} }
@ -102,7 +102,7 @@ func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSched
// WithName sets the Name field in the declarative configuration to the given value // WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call. // If called multiple times, the Name field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.Name = &value b.Name = &value
return b return b
@ -111,7 +111,7 @@ func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingA
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value // WithGenerateName sets the GenerateName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateName field is set to the value of the last call. // If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.GenerateName = &value b.GenerateName = &value
return b return b
@ -120,7 +120,7 @@ func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSch
// WithNamespace sets the Namespace field in the declarative configuration to the given value // WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call. // If called multiple times, the Namespace field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.Namespace = &value b.Namespace = &value
return b return b
@ -129,7 +129,7 @@ func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedu
// WithUID sets the UID field in the declarative configuration to the given value // WithUID sets the UID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UID field is set to the value of the last call. // If called multiple times, the UID field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.UID = &value b.UID = &value
return b return b
@ -138,7 +138,7 @@ func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulin
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call. // If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.ResourceVersion = &value b.ResourceVersion = &value
return b return b
@ -147,7 +147,7 @@ func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *Pod
// WithGeneration sets the Generation field in the declarative configuration to the given value // WithGeneration sets the Generation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Generation field is set to the value of the last call. // If called multiple times, the Generation field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.Generation = &value b.Generation = &value
return b return b
@ -156,7 +156,7 @@ func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedu
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call. // If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.CreationTimestamp = &value b.CreationTimestamp = &value
return b return b
@ -165,7 +165,7 @@ func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Tim
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call. // If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.DeletionTimestamp = &value b.DeletionTimestamp = &value
return b return b
@ -174,7 +174,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
b.DeletionGracePeriodSeconds = &value b.DeletionGracePeriodSeconds = &value
return b return b
@ -184,7 +184,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// and returns the receiver, so that objects can be build by chaining "With" function invocations. // and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Labels field, // If called multiple times, the entries provided by each call will be put on the Labels field,
// overwriting an existing map entries in Labels field with the same key. // overwriting an existing map entries in Labels field with the same key.
func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
if b.Labels == nil && len(entries) > 0 { if b.Labels == nil && len(entries) > 0 {
b.Labels = make(map[string]string, len(entries)) b.Labels = make(map[string]string, len(entries))
@ -199,7 +199,7 @@ func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string)
// and returns the receiver, so that objects can be build by chaining "With" function invocations. // and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Annotations field, // If called multiple times, the entries provided by each call will be put on the Annotations field,
// overwriting an existing map entries in Annotations field with the same key. // overwriting an existing map entries in Annotations field with the same key.
func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
if b.Annotations == nil && len(entries) > 0 { if b.Annotations == nil && len(entries) > 0 {
b.Annotations = make(map[string]string, len(entries)) b.Annotations = make(map[string]string, len(entries))
@ -213,7 +213,7 @@ func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]str
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations. // and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field. // If called multiple times, values provided by each call will be appended to the OwnerReferences field.
func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
for i := range values { for i := range values {
if values[i] == nil { if values[i] == nil {
@ -227,7 +227,7 @@ func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration // WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations. // and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field. // If called multiple times, values provided by each call will be appended to the Finalizers field.
func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists() b.ensureObjectMetaApplyConfigurationExists()
for i := range values { for i := range values {
b.Finalizers = append(b.Finalizers, values[i]) b.Finalizers = append(b.Finalizers, values[i])
@ -235,7 +235,7 @@ func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodS
return b return b
} }
func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil { if b.ObjectMetaApplyConfiguration == nil {
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
} }
@ -244,7 +244,7 @@ func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExis
// WithSpec sets the Spec field in the declarative configuration to the given value // WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call. // If called multiple times, the Spec field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithSpec(value *PodSchedulingSpecApplyConfiguration) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration {
b.Spec = value b.Spec = value
return b return b
} }
@ -252,7 +252,7 @@ func (b *PodSchedulingApplyConfiguration) WithSpec(value *PodSchedulingSpecApply
// WithStatus sets the Status field in the declarative configuration to the given value // WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call. // If called multiple times, the Status field is set to the value of the last call.
func (b *PodSchedulingApplyConfiguration) WithStatus(value *PodSchedulingStatusApplyConfiguration) *PodSchedulingApplyConfiguration { func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration {
b.Status = value b.Status = value
return b return b
} }

View File

@ -18,23 +18,23 @@ limitations under the License.
package v1alpha2 package v1alpha2
// PodSchedulingSpecApplyConfiguration represents an declarative configuration of the PodSchedulingSpec type for use // PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use
// with apply. // with apply.
type PodSchedulingSpecApplyConfiguration struct { type PodSchedulingContextSpecApplyConfiguration struct {
SelectedNode *string `json:"selectedNode,omitempty"` SelectedNode *string `json:"selectedNode,omitempty"`
PotentialNodes []string `json:"potentialNodes,omitempty"` PotentialNodes []string `json:"potentialNodes,omitempty"`
} }
// PodSchedulingSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingSpec type for use with // PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with
// apply. // apply.
func PodSchedulingSpec() *PodSchedulingSpecApplyConfiguration { func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration {
return &PodSchedulingSpecApplyConfiguration{} return &PodSchedulingContextSpecApplyConfiguration{}
} }
// WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value // WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SelectedNode field is set to the value of the last call. // If called multiple times, the SelectedNode field is set to the value of the last call.
func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingSpecApplyConfiguration { func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration {
b.SelectedNode = &value b.SelectedNode = &value
return b return b
} }
@ -42,7 +42,7 @@ func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *Po
// WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration // WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations. // and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the PotentialNodes field. // If called multiple times, values provided by each call will be appended to the PotentialNodes field.
func (b *PodSchedulingSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingSpecApplyConfiguration { func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration {
for i := range values { for i := range values {
b.PotentialNodes = append(b.PotentialNodes, values[i]) b.PotentialNodes = append(b.PotentialNodes, values[i])
} }

View File

@ -18,22 +18,22 @@ limitations under the License.
package v1alpha2 package v1alpha2
// PodSchedulingStatusApplyConfiguration represents an declarative configuration of the PodSchedulingStatus type for use // PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use
// with apply. // with apply.
type PodSchedulingStatusApplyConfiguration struct { type PodSchedulingContextStatusApplyConfiguration struct {
ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"`
} }
// PodSchedulingStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingStatus type for use with // PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with
// apply. // apply.
func PodSchedulingStatus() *PodSchedulingStatusApplyConfiguration { func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration {
return &PodSchedulingStatusApplyConfiguration{} return &PodSchedulingContextStatusApplyConfiguration{}
} }
// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration // WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations. // and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the ResourceClaims field. // If called multiple times, values provided by each call will be appended to the ResourceClaims field.
func (b *PodSchedulingStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingStatusApplyConfiguration { func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration {
for i := range values { for i := range values {
if values[i] == nil { if values[i] == nil {
panic("nil value passed to WithResourceClaims") panic("nil value passed to WithResourceClaims")

View File

@ -1451,12 +1451,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
// Group=resource.k8s.io, Version=v1alpha2 // Group=resource.k8s.io, Version=v1alpha2
case v1alpha2.SchemeGroupVersion.WithKind("AllocationResult"): case v1alpha2.SchemeGroupVersion.WithKind("AllocationResult"):
return &resourcev1alpha2.AllocationResultApplyConfiguration{} return &resourcev1alpha2.AllocationResultApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("PodScheduling"): case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContext"):
return &resourcev1alpha2.PodSchedulingApplyConfiguration{} return &resourcev1alpha2.PodSchedulingContextApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingSpec"): case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContextSpec"):
return &resourcev1alpha2.PodSchedulingSpecApplyConfiguration{} return &resourcev1alpha2.PodSchedulingContextSpecApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingStatus"): case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContextStatus"):
return &resourcev1alpha2.PodSchedulingStatusApplyConfiguration{} return &resourcev1alpha2.PodSchedulingContextStatusApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim"): case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim"):
return &resourcev1alpha2.ResourceClaimApplyConfiguration{} return &resourcev1alpha2.ResourceClaimApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): case v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"):

View File

@ -349,8 +349,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil
// Group=resource.k8s.io, Version=v1alpha2 // Group=resource.k8s.io, Version=v1alpha2
case v1alpha2.SchemeGroupVersion.WithResource("podschedulings"): case v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulings().Informer()}, nil return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulingContexts().Informer()}, nil
case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"): case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil
case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"): case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"):

View File

@ -24,8 +24,8 @@ import (
// Interface provides access to all the informers in this group version. // Interface provides access to all the informers in this group version.
type Interface interface { type Interface interface {
// PodSchedulings returns a PodSchedulingInformer. // PodSchedulingContexts returns a PodSchedulingContextInformer.
PodSchedulings() PodSchedulingInformer PodSchedulingContexts() PodSchedulingContextInformer
// ResourceClaims returns a ResourceClaimInformer. // ResourceClaims returns a ResourceClaimInformer.
ResourceClaims() ResourceClaimInformer ResourceClaims() ResourceClaimInformer
// ResourceClaimTemplates returns a ResourceClaimTemplateInformer. // ResourceClaimTemplates returns a ResourceClaimTemplateInformer.
@ -45,9 +45,9 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
} }
// PodSchedulings returns a PodSchedulingInformer. // PodSchedulingContexts returns a PodSchedulingContextInformer.
func (v *version) PodSchedulings() PodSchedulingInformer { func (v *version) PodSchedulingContexts() PodSchedulingContextInformer {
return &podSchedulingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
} }
// ResourceClaims returns a ResourceClaimInformer. // ResourceClaims returns a ResourceClaimInformer.

View File

@ -32,59 +32,59 @@ import (
cache "k8s.io/client-go/tools/cache" cache "k8s.io/client-go/tools/cache"
) )
// PodSchedulingInformer provides access to a shared informer and lister for // PodSchedulingContextInformer provides access to a shared informer and lister for
// PodSchedulings. // PodSchedulingContexts.
type PodSchedulingInformer interface { type PodSchedulingContextInformer interface {
Informer() cache.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() v1alpha2.PodSchedulingLister Lister() v1alpha2.PodSchedulingContextLister
} }
type podSchedulingInformer struct { type podSchedulingContextInformer struct {
factory internalinterfaces.SharedInformerFactory factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string namespace string
} }
// NewPodSchedulingInformer constructs a new informer for PodScheduling type. // NewPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent // Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server. // one. This reduces memory footprint and number of connections to the server.
func NewPodSchedulingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { func NewPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPodSchedulingInformer(client, namespace, resyncPeriod, indexers, nil) return NewFilteredPodSchedulingContextInformer(client, namespace, resyncPeriod, indexers, nil)
} }
// NewFilteredPodSchedulingInformer constructs a new informer for PodScheduling type. // NewFilteredPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent // Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server. // one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodSchedulingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer( return cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil { if tweakListOptions != nil {
tweakListOptions(&options) tweakListOptions(&options)
} }
return client.ResourceV1alpha2().PodSchedulings(namespace).List(context.TODO(), options) return client.ResourceV1alpha2().PodSchedulingContexts(namespace).List(context.TODO(), options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil { if tweakListOptions != nil {
tweakListOptions(&options) tweakListOptions(&options)
} }
return client.ResourceV1alpha2().PodSchedulings(namespace).Watch(context.TODO(), options) return client.ResourceV1alpha2().PodSchedulingContexts(namespace).Watch(context.TODO(), options)
}, },
}, },
&resourcev1alpha2.PodScheduling{}, &resourcev1alpha2.PodSchedulingContext{},
resyncPeriod, resyncPeriod,
indexers, indexers,
) )
} }
func (f *podSchedulingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPodSchedulingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) return NewFilteredPodSchedulingContextInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
} }
func (f *podSchedulingInformer) Informer() cache.SharedIndexInformer { func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&resourcev1alpha2.PodScheduling{}, f.defaultInformer) return f.factory.InformerFor(&resourcev1alpha2.PodSchedulingContext{}, f.defaultInformer)
} }
func (f *podSchedulingInformer) Lister() v1alpha2.PodSchedulingLister { func (f *podSchedulingContextInformer) Lister() v1alpha2.PodSchedulingContextLister {
return v1alpha2.NewPodSchedulingLister(f.Informer().GetIndexer()) return v1alpha2.NewPodSchedulingContextLister(f.Informer().GetIndexer())
} }

View File

@ -1,189 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
json "encoding/json"
"fmt"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
testing "k8s.io/client-go/testing"
)
// FakePodSchedulings implements PodSchedulingInterface
type FakePodSchedulings struct {
Fake *FakeResourceV1alpha2
ns string
}
var podschedulingsResource = v1alpha2.SchemeGroupVersion.WithResource("podschedulings")
var podschedulingsKind = v1alpha2.SchemeGroupVersion.WithKind("PodScheduling")
// Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any.
func (c *FakePodSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodScheduling, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(podschedulingsResource, c.ns, name), &v1alpha2.PodScheduling{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodScheduling), err
}
// List takes label and field selectors, and returns the list of PodSchedulings that match those selectors.
func (c *FakePodSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(podschedulingsResource, podschedulingsKind, c.ns, opts), &v1alpha2.PodSchedulingList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha2.PodSchedulingList{ListMeta: obj.(*v1alpha2.PodSchedulingList).ListMeta}
for _, item := range obj.(*v1alpha2.PodSchedulingList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested podSchedulings.
func (c *FakePodSchedulings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(podschedulingsResource, c.ns, opts))
}
// Create takes the representation of a podScheduling and creates it. Returns the server's representation of the podScheduling, and an error, if there is any.
func (c *FakePodSchedulings) Create(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.CreateOptions) (result *v1alpha2.PodScheduling, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(podschedulingsResource, c.ns, podScheduling), &v1alpha2.PodScheduling{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodScheduling), err
}
// Update takes the representation of a podScheduling and updates it. Returns the server's representation of the podScheduling, and an error, if there is any.
func (c *FakePodSchedulings) Update(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (result *v1alpha2.PodScheduling, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(podschedulingsResource, c.ns, podScheduling), &v1alpha2.PodScheduling{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodScheduling), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakePodSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (*v1alpha2.PodScheduling, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(podschedulingsResource, "status", c.ns, podScheduling), &v1alpha2.PodScheduling{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodScheduling), err
}
// Delete takes name of the podScheduling and deletes it. Returns an error if one occurs.
func (c *FakePodSchedulings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteActionWithOptions(podschedulingsResource, c.ns, name, opts), &v1alpha2.PodScheduling{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakePodSchedulings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(podschedulingsResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha2.PodSchedulingList{})
return err
}
// Patch applies the patch and returns the patched podScheduling.
func (c *FakePodSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodScheduling, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, name, pt, data, subresources...), &v1alpha2.PodScheduling{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodScheduling), err
}
// Apply takes the given apply declarative configuration, applies it and returns the applied podScheduling.
func (c *FakePodSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) {
if podScheduling == nil {
return nil, fmt.Errorf("podScheduling provided to Apply must not be nil")
}
data, err := json.Marshal(podScheduling)
if err != nil {
return nil, err
}
name := podScheduling.Name
if name == nil {
return nil, fmt.Errorf("podScheduling.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.PodScheduling{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodScheduling), err
}
// ApplyStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
func (c *FakePodSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) {
if podScheduling == nil {
return nil, fmt.Errorf("podScheduling provided to Apply must not be nil")
}
data, err := json.Marshal(podScheduling)
if err != nil {
return nil, err
}
name := podScheduling.Name
if name == nil {
return nil, fmt.Errorf("podScheduling.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podschedulingsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.PodScheduling{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodScheduling), err
}

View File

@ -0,0 +1,189 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
json "encoding/json"
"fmt"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
testing "k8s.io/client-go/testing"
)
// FakePodSchedulingContexts implements PodSchedulingContextInterface
type FakePodSchedulingContexts struct {
Fake *FakeResourceV1alpha2
ns string
}
var podschedulingcontextsResource = v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts")
var podschedulingcontextsKind = v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContext")
// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any.
func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(podschedulingcontextsResource, c.ns, name), &v1alpha2.PodSchedulingContext{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodSchedulingContext), err
}
// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors.
func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), &v1alpha2.PodSchedulingContextList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha2.PodSchedulingContextList{ListMeta: obj.(*v1alpha2.PodSchedulingContextList).ListMeta}
for _, item := range obj.(*v1alpha2.PodSchedulingContextList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested podSchedulingContexts.
func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(podschedulingcontextsResource, c.ns, opts))
}
// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any.
func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodSchedulingContext), err
}
// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any.
func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodSchedulingContext), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(podschedulingcontextsResource, "status", c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodSchedulingContext), err
}
// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs.
func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha2.PodSchedulingContext{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(podschedulingcontextsResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha2.PodSchedulingContextList{})
return err
}
// Patch applies the patch and returns the patched podSchedulingContext.
func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, name, pt, data, subresources...), &v1alpha2.PodSchedulingContext{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodSchedulingContext), err
}
// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext.
func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
if podSchedulingContext == nil {
return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
}
data, err := json.Marshal(podSchedulingContext)
if err != nil {
return nil, err
}
name := podSchedulingContext.Name
if name == nil {
return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.PodSchedulingContext{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodSchedulingContext), err
}
// ApplyStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
if podSchedulingContext == nil {
return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
}
data, err := json.Marshal(podSchedulingContext)
if err != nil {
return nil, err
}
name := podSchedulingContext.Name
if name == nil {
return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.PodSchedulingContext{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.PodSchedulingContext), err
}

View File

@ -28,8 +28,8 @@ type FakeResourceV1alpha2 struct {
*testing.Fake *testing.Fake
} }
func (c *FakeResourceV1alpha2) PodSchedulings(namespace string) v1alpha2.PodSchedulingInterface { func (c *FakeResourceV1alpha2) PodSchedulingContexts(namespace string) v1alpha2.PodSchedulingContextInterface {
return &FakePodSchedulings{c, namespace} return &FakePodSchedulingContexts{c, namespace}
} }
func (c *FakeResourceV1alpha2) ResourceClaims(namespace string) v1alpha2.ResourceClaimInterface { func (c *FakeResourceV1alpha2) ResourceClaims(namespace string) v1alpha2.ResourceClaimInterface {

View File

@ -18,7 +18,7 @@ limitations under the License.
package v1alpha2 package v1alpha2
type PodSchedulingExpansion interface{} type PodSchedulingContextExpansion interface{}
type ResourceClaimExpansion interface{} type ResourceClaimExpansion interface{}

View File

@ -1,256 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
import (
"context"
json "encoding/json"
"fmt"
"time"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
// PodSchedulingsGetter has a method to return a PodSchedulingInterface.
// A group's client should implement this interface.
type PodSchedulingsGetter interface {
PodSchedulings(namespace string) PodSchedulingInterface
}
// PodSchedulingInterface has methods to work with PodScheduling resources.
type PodSchedulingInterface interface {
Create(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.CreateOptions) (*v1alpha2.PodScheduling, error)
Update(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (*v1alpha2.PodScheduling, error)
UpdateStatus(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (*v1alpha2.PodScheduling, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodScheduling, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodScheduling, err error)
Apply(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error)
ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error)
PodSchedulingExpansion
}
// podSchedulings implements PodSchedulingInterface
type podSchedulings struct {
client rest.Interface
ns string
}
// newPodSchedulings returns a PodSchedulings
func newPodSchedulings(c *ResourceV1alpha2Client, namespace string) *podSchedulings {
return &podSchedulings{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any.
func (c *podSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodScheduling, err error) {
result = &v1alpha2.PodScheduling{}
err = c.client.Get().
Namespace(c.ns).
Resource("podschedulings").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of PodSchedulings that match those selectors.
func (c *podSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha2.PodSchedulingList{}
err = c.client.Get().
Namespace(c.ns).
Resource("podschedulings").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested podSchedulings.
func (c *podSchedulings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("podschedulings").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a podScheduling and creates it. Returns the server's representation of the podScheduling, and an error, if there is any.
func (c *podSchedulings) Create(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.CreateOptions) (result *v1alpha2.PodScheduling, err error) {
result = &v1alpha2.PodScheduling{}
err = c.client.Post().
Namespace(c.ns).
Resource("podschedulings").
VersionedParams(&opts, scheme.ParameterCodec).
Body(podScheduling).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a podScheduling and updates it. Returns the server's representation of the podScheduling, and an error, if there is any.
func (c *podSchedulings) Update(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (result *v1alpha2.PodScheduling, err error) {
result = &v1alpha2.PodScheduling{}
err = c.client.Put().
Namespace(c.ns).
Resource("podschedulings").
Name(podScheduling.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(podScheduling).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *podSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha2.PodScheduling, opts v1.UpdateOptions) (result *v1alpha2.PodScheduling, err error) {
result = &v1alpha2.PodScheduling{}
err = c.client.Put().
Namespace(c.ns).
Resource("podschedulings").
Name(podScheduling.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(podScheduling).
Do(ctx).
Into(result)
return
}
// Delete takes name of the podScheduling and deletes it. Returns an error if one occurs.
func (c *podSchedulings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("podschedulings").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *podSchedulings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("podschedulings").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched podScheduling.
func (c *podSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodScheduling, err error) {
result = &v1alpha2.PodScheduling{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("podschedulings").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
// Apply takes the given apply declarative configuration, applies it and returns the applied podScheduling.
func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) {
if podScheduling == nil {
return nil, fmt.Errorf("podScheduling provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(podScheduling)
if err != nil {
return nil, err
}
name := podScheduling.Name
if name == nil {
return nil, fmt.Errorf("podScheduling.Name must be provided to Apply")
}
result = &v1alpha2.PodScheduling{}
err = c.client.Patch(types.ApplyPatchType).
Namespace(c.ns).
Resource("podschedulings").
Name(*name).
VersionedParams(&patchOpts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
// ApplyStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
func (c *podSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha2.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodScheduling, err error) {
if podScheduling == nil {
return nil, fmt.Errorf("podScheduling provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(podScheduling)
if err != nil {
return nil, err
}
name := podScheduling.Name
if name == nil {
return nil, fmt.Errorf("podScheduling.Name must be provided to Apply")
}
result = &v1alpha2.PodScheduling{}
err = c.client.Patch(types.ApplyPatchType).
Namespace(c.ns).
Resource("podschedulings").
Name(*name).
SubResource("status").
VersionedParams(&patchOpts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@ -0,0 +1,256 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
import (
"context"
json "encoding/json"
"fmt"
"time"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface.
// A group's client should implement this interface.
type PodSchedulingContextsGetter interface {
PodSchedulingContexts(namespace string) PodSchedulingContextInterface
}
// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources.
type PodSchedulingContextInterface interface {
Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error)
Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error)
UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error)
Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error)
ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error)
PodSchedulingContextExpansion
}
// podSchedulingContexts implements PodSchedulingContextInterface
type podSchedulingContexts struct {
client rest.Interface
ns string
}
// newPodSchedulingContexts returns a PodSchedulingContexts
func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts {
return &podSchedulingContexts{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any.
func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) {
result = &v1alpha2.PodSchedulingContext{}
err = c.client.Get().
Namespace(c.ns).
Resource("podschedulingcontexts").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors.
func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha2.PodSchedulingContextList{}
err = c.client.Get().
Namespace(c.ns).
Resource("podschedulingcontexts").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested podSchedulingContexts.
func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("podschedulingcontexts").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any.
func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
result = &v1alpha2.PodSchedulingContext{}
err = c.client.Post().
Namespace(c.ns).
Resource("podschedulingcontexts").
VersionedParams(&opts, scheme.ParameterCodec).
Body(podSchedulingContext).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any.
func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
result = &v1alpha2.PodSchedulingContext{}
err = c.client.Put().
Namespace(c.ns).
Resource("podschedulingcontexts").
Name(podSchedulingContext.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(podSchedulingContext).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) {
result = &v1alpha2.PodSchedulingContext{}
err = c.client.Put().
Namespace(c.ns).
Resource("podschedulingcontexts").
Name(podSchedulingContext.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(podSchedulingContext).
Do(ctx).
Into(result)
return
}
// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs.
func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("podschedulingcontexts").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("podschedulingcontexts").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched podSchedulingContext.
func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) {
result = &v1alpha2.PodSchedulingContext{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("podschedulingcontexts").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext.
func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
if podSchedulingContext == nil {
return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(podSchedulingContext)
if err != nil {
return nil, err
}
name := podSchedulingContext.Name
if name == nil {
return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
}
result = &v1alpha2.PodSchedulingContext{}
err = c.client.Patch(types.ApplyPatchType).
Namespace(c.ns).
Resource("podschedulingcontexts").
Name(*name).
VersionedParams(&patchOpts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
// ApplyStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) {
if podSchedulingContext == nil {
return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(podSchedulingContext)
if err != nil {
return nil, err
}
name := podSchedulingContext.Name
if name == nil {
return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply")
}
result = &v1alpha2.PodSchedulingContext{}
err = c.client.Patch(types.ApplyPatchType).
Namespace(c.ns).
Resource("podschedulingcontexts").
Name(*name).
SubResource("status").
VersionedParams(&patchOpts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@ -28,7 +28,7 @@ import (
type ResourceV1alpha2Interface interface { type ResourceV1alpha2Interface interface {
RESTClient() rest.Interface RESTClient() rest.Interface
PodSchedulingsGetter PodSchedulingContextsGetter
ResourceClaimsGetter ResourceClaimsGetter
ResourceClaimTemplatesGetter ResourceClaimTemplatesGetter
ResourceClassesGetter ResourceClassesGetter
@ -39,8 +39,8 @@ type ResourceV1alpha2Client struct {
restClient rest.Interface restClient rest.Interface
} }
func (c *ResourceV1alpha2Client) PodSchedulings(namespace string) PodSchedulingInterface { func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface {
return newPodSchedulings(c, namespace) return newPodSchedulingContexts(c, namespace)
} }
func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface {

View File

@ -18,13 +18,13 @@ limitations under the License.
package v1alpha2 package v1alpha2
// PodSchedulingListerExpansion allows custom methods to be added to // PodSchedulingContextListerExpansion allows custom methods to be added to
// PodSchedulingLister. // PodSchedulingContextLister.
type PodSchedulingListerExpansion interface{} type PodSchedulingContextListerExpansion interface{}
// PodSchedulingNamespaceListerExpansion allows custom methods to be added to // PodSchedulingContextNamespaceListerExpansion allows custom methods to be added to
// PodSchedulingNamespaceLister. // PodSchedulingContextNamespaceLister.
type PodSchedulingNamespaceListerExpansion interface{} type PodSchedulingContextNamespaceListerExpansion interface{}
// ResourceClaimListerExpansion allows custom methods to be added to // ResourceClaimListerExpansion allows custom methods to be added to
// ResourceClaimLister. // ResourceClaimLister.

View File

@ -1,99 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
import (
v1alpha2 "k8s.io/api/resource/v1alpha2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// PodSchedulingLister helps list PodSchedulings.
// All objects returned here must be treated as read-only.
type PodSchedulingLister interface {
// List lists all PodSchedulings in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error)
// PodSchedulings returns an object that can list and get PodSchedulings.
PodSchedulings(namespace string) PodSchedulingNamespaceLister
PodSchedulingListerExpansion
}
// podSchedulingLister implements the PodSchedulingLister interface.
type podSchedulingLister struct {
indexer cache.Indexer
}
// NewPodSchedulingLister returns a new PodSchedulingLister.
func NewPodSchedulingLister(indexer cache.Indexer) PodSchedulingLister {
return &podSchedulingLister{indexer: indexer}
}
// List lists all PodSchedulings in the indexer.
func (s *podSchedulingLister) List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.PodScheduling))
})
return ret, err
}
// PodSchedulings returns an object that can list and get PodSchedulings.
func (s *podSchedulingLister) PodSchedulings(namespace string) PodSchedulingNamespaceLister {
return podSchedulingNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// PodSchedulingNamespaceLister helps list and get PodSchedulings.
// All objects returned here must be treated as read-only.
type PodSchedulingNamespaceLister interface {
// List lists all PodSchedulings in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error)
// Get retrieves the PodScheduling from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha2.PodScheduling, error)
PodSchedulingNamespaceListerExpansion
}
// podSchedulingNamespaceLister implements the PodSchedulingNamespaceLister
// interface.
type podSchedulingNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all PodSchedulings in the indexer for a given namespace.
func (s podSchedulingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.PodScheduling, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.PodScheduling))
})
return ret, err
}
// Get retrieves the PodScheduling from the indexer for a given namespace and name.
func (s podSchedulingNamespaceLister) Get(name string) (*v1alpha2.PodScheduling, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha2.Resource("podscheduling"), name)
}
return obj.(*v1alpha2.PodScheduling), nil
}

View File

@ -0,0 +1,99 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
import (
v1alpha2 "k8s.io/api/resource/v1alpha2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// PodSchedulingContextLister helps list PodSchedulingContexts.
// All objects returned here must be treated as read-only.
type PodSchedulingContextLister interface {
// List lists all PodSchedulingContexts in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error)
// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts.
PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister
PodSchedulingContextListerExpansion
}
// podSchedulingContextLister implements the PodSchedulingContextLister interface.
type podSchedulingContextLister struct {
indexer cache.Indexer
}
// NewPodSchedulingContextLister returns a new PodSchedulingContextLister.
func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister {
return &podSchedulingContextLister{indexer: indexer}
}
// List lists all PodSchedulingContexts in the indexer.
func (s *podSchedulingContextLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.PodSchedulingContext))
})
return ret, err
}
// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts.
func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister {
return podSchedulingContextNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts.
// All objects returned here must be treated as read-only.
type PodSchedulingContextNamespaceLister interface {
// List lists all PodSchedulingContexts in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error)
// Get retrieves the PodSchedulingContext from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha2.PodSchedulingContext, error)
PodSchedulingContextNamespaceListerExpansion
}
// podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister
// interface.
type podSchedulingContextNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all PodSchedulingContexts in the indexer for a given namespace.
func (s podSchedulingContextNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.PodSchedulingContext))
})
return ret, err
}
// Get retrieves the PodSchedulingContext from the indexer for a given namespace and name.
func (s podSchedulingContextNamespaceLister) Get(name string) (*v1alpha2.PodSchedulingContext, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha2.Resource("podschedulingcontext"), name)
}
return obj.(*v1alpha2.PodSchedulingContext), nil
}

View File

@ -139,9 +139,9 @@ type controller struct {
rcLister resourcev1alpha2listers.ResourceClassLister rcLister resourcev1alpha2listers.ResourceClassLister
rcSynced cache.InformerSynced rcSynced cache.InformerSynced
claimCache cache.MutationCache claimCache cache.MutationCache
podSchedulingLister resourcev1alpha2listers.PodSchedulingLister schedulingCtxLister resourcev1alpha2listers.PodSchedulingContextLister
claimSynced cache.InformerSynced claimSynced cache.InformerSynced
podSchedulingSynced cache.InformerSynced schedulingCtxSynced cache.InformerSynced
} }
// TODO: make it configurable // TODO: make it configurable
@ -157,7 +157,7 @@ func New(
logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller") logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller")
rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses() rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses()
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings() schedulingCtxInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
go func() { go func() {
@ -177,7 +177,7 @@ func New(
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme,
v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)}) v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)})
// The work queue contains either keys for claims or PodScheduling objects. // The work queue contains either keys for claims or PodSchedulingContext objects.
queue := workqueue.NewNamedRateLimitingQueue( queue := workqueue.NewNamedRateLimitingQueue(
workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-queue", name)) workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-queue", name))
@ -199,8 +199,8 @@ func New(
rcSynced: rcInformer.Informer().HasSynced, rcSynced: rcInformer.Informer().HasSynced,
claimCache: claimCache, claimCache: claimCache,
claimSynced: claimInformer.Informer().HasSynced, claimSynced: claimInformer.Informer().HasSynced,
podSchedulingLister: podSchedulingInformer.Lister(), schedulingCtxLister: schedulingCtxInformer.Lister(),
podSchedulingSynced: podSchedulingInformer.Informer().HasSynced, schedulingCtxSynced: schedulingCtxInformer.Informer().HasSynced,
queue: queue, queue: queue,
eventRecorder: eventRecorder, eventRecorder: eventRecorder,
} }
@ -209,11 +209,11 @@ func New(
if loggerV6.Enabled() { if loggerV6.Enabled() {
resourceClaimLogger := klog.LoggerWithValues(loggerV6, "type", "ResourceClaim") resourceClaimLogger := klog.LoggerWithValues(loggerV6, "type", "ResourceClaim")
_, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&resourceClaimLogger, ctrl)) _, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&resourceClaimLogger, ctrl))
podSchedulingLogger := klog.LoggerWithValues(loggerV6, "type", "PodScheduling") schedulingCtxLogger := klog.LoggerWithValues(loggerV6, "type", "PodSchedulingContext")
_, _ = podSchedulingInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&podSchedulingLogger, ctrl)) _, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&schedulingCtxLogger, ctrl))
} else { } else {
_, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl)) _, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
_, _ = podSchedulingInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl)) _, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
} }
return ctrl return ctrl
@ -233,7 +233,7 @@ func resourceEventHandlerFuncs(logger *klog.Logger, ctrl *controller) cache.Reso
const ( const (
claimKeyPrefix = "claim:" claimKeyPrefix = "claim:"
podSchedulingKeyPrefix = "podscheduling:" schedulingCtxKeyPrefix = "schedulingCtx:"
) )
func (ctrl *controller) add(logger *klog.Logger, obj interface{}) { func (ctrl *controller) add(logger *klog.Logger, obj interface{}) {
@ -279,8 +279,8 @@ func getKey(obj interface{}) (string, error) {
switch obj.(type) { switch obj.(type) {
case *resourcev1alpha2.ResourceClaim: case *resourcev1alpha2.ResourceClaim:
prefix = claimKeyPrefix prefix = claimKeyPrefix
case *resourcev1alpha2.PodScheduling: case *resourcev1alpha2.PodSchedulingContext:
prefix = podSchedulingKeyPrefix prefix = schedulingCtxKeyPrefix
default: default:
return "", fmt.Errorf("unexpected object: %T", obj) return "", fmt.Errorf("unexpected object: %T", obj)
} }
@ -297,7 +297,7 @@ func (ctrl *controller) Run(workers int) {
stopCh := ctrl.ctx.Done() stopCh := ctrl.ctx.Done()
if !cache.WaitForCacheSync(stopCh, ctrl.rcSynced, ctrl.claimSynced, ctrl.podSchedulingSynced) { if !cache.WaitForCacheSync(stopCh, ctrl.rcSynced, ctrl.claimSynced, ctrl.schedulingCtxSynced) {
ctrl.logger.Error(nil, "Cannot sync caches") ctrl.logger.Error(nil, "Cannot sync caches")
return return
} }
@ -370,16 +370,16 @@ func (ctrl *controller) syncKey(ctx context.Context, key string) (obj runtime.Ob
return nil, err return nil, err
} }
obj, finalErr = claim, ctrl.syncClaim(ctx, claim) obj, finalErr = claim, ctrl.syncClaim(ctx, claim)
case podSchedulingKeyPrefix: case schedulingCtxKeyPrefix:
podScheduling, err := ctrl.podSchedulingLister.PodSchedulings(namespace).Get(name) schedulingCtx, err := ctrl.schedulingCtxLister.PodSchedulingContexts(namespace).Get(name)
if err != nil { if err != nil {
if k8serrors.IsNotFound(err) { if k8serrors.IsNotFound(err) {
klog.FromContext(ctx).V(5).Info("PodScheduling was deleted, no need to process it") klog.FromContext(ctx).V(5).Info("PodSchedulingContext was deleted, no need to process it")
return nil, nil return nil, nil
} }
return nil, err return nil, err
} }
obj, finalErr = podScheduling, ctrl.syncPodScheduling(ctx, podScheduling) obj, finalErr = schedulingCtx, ctrl.syncPodSchedulingContexts(ctx, schedulingCtx)
} }
return return
} }
@ -525,9 +525,9 @@ func (ctrl *controller) allocateClaim(ctx context.Context,
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
if claim.Status.Allocation != nil { if claim.Status.Allocation != nil {
// This can happen when two PodScheduling objects trigger // This can happen when two PodSchedulingContext objects trigger
// allocation attempts (first one wins) or when we see the // allocation attempts (first one wins) or when we see the
// update of the PodScheduling object. // update of the PodSchedulingContext object.
logger.V(5).Info("Claim already allocated, nothing to do") logger.V(5).Info("Claim already allocated, nothing to do")
return nil return nil
} }
@ -601,19 +601,19 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim
}, nil }, nil
} }
// syncClaim determines which next action may be needed for a PodScheduling object // syncPodSchedulingContext determines which next action may be needed for a PodSchedulingContext object
// and does it. // and does it.
func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *resourcev1alpha2.PodScheduling) error { func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error {
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
// Ignore deleted objects. // Ignore deleted objects.
if podScheduling.DeletionTimestamp != nil { if schedulingCtx.DeletionTimestamp != nil {
logger.V(5).Info("PodScheduling marked for deletion") logger.V(5).Info("PodSchedulingContext marked for deletion")
return nil return nil
} }
if podScheduling.Spec.SelectedNode == "" && if schedulingCtx.Spec.SelectedNode == "" &&
len(podScheduling.Spec.PotentialNodes) == 0 { len(schedulingCtx.Spec.PotentialNodes) == 0 {
// Nothing to do? Shouldn't occur. // Nothing to do? Shouldn't occur.
logger.V(5).Info("Waiting for scheduler to set fields") logger.V(5).Info("Waiting for scheduler to set fields")
return nil return nil
@ -621,8 +621,8 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
// Check pod. // Check pod.
// TODO (?): use an informer - only useful when many (most?) pods have claims // TODO (?): use an informer - only useful when many (most?) pods have claims
// TODO (?): let the scheduler copy all claim names + UIDs into PodScheduling - then we don't need the pod // TODO (?): let the scheduler copy all claim names + UIDs into PodSchedulingContext - then we don't need the pod
pod, err := ctrl.kubeClient.CoreV1().Pods(podScheduling.Namespace).Get(ctx, podScheduling.Name, metav1.GetOptions{}) pod, err := ctrl.kubeClient.CoreV1().Pods(schedulingCtx.Namespace).Get(ctx, schedulingCtx.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -632,16 +632,16 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
} }
// Still the owner? // Still the owner?
if !metav1.IsControlledBy(podScheduling, pod) { if !metav1.IsControlledBy(schedulingCtx, pod) {
// Must be obsolete object, do nothing for it. // Must be obsolete object, do nothing for it.
logger.V(5).Info("Pod not owner, PodScheduling is obsolete") logger.V(5).Info("Pod not owner, PodSchedulingContext is obsolete")
return nil return nil
} }
// Find all pending claims that are owned by us. We bail out if any of the pre-requisites // Find all pending claims that are owned by us. We bail out if any of the pre-requisites
// for pod scheduling (claims exist, classes exist, parameters exist) are not met. // for pod scheduling (claims exist, classes exist, parameters exist) are not met.
// The scheduler will do the same, except for checking parameters, so usually // The scheduler will do the same, except for checking parameters, so usually
// everything should be ready once the PodScheduling object exists. // everything should be ready once the PodSchedulingContext object exists.
var claims claimAllocations var claims claimAllocations
for _, podClaim := range pod.Spec.ResourceClaims { for _, podClaim := range pod.Spec.ResourceClaims {
delayed, err := ctrl.checkPodClaim(ctx, pod, podClaim) delayed, err := ctrl.checkPodClaim(ctx, pod, podClaim)
@ -665,12 +665,12 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
// and shouldn't, because those allocations might have to be undone to // and shouldn't, because those allocations might have to be undone to
// pick a better node. If we don't need to allocate now, then we'll // pick a better node. If we don't need to allocate now, then we'll
// simply report back the gather information. // simply report back the gather information.
if len(podScheduling.Spec.PotentialNodes) > 0 { if len(schedulingCtx.Spec.PotentialNodes) > 0 {
if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, podScheduling.Spec.PotentialNodes); err != nil { if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, schedulingCtx.Spec.PotentialNodes); err != nil {
return fmt.Errorf("checking potential nodes: %v", err) return fmt.Errorf("checking potential nodes: %v", err)
} }
} }
selectedNode := podScheduling.Spec.SelectedNode selectedNode := schedulingCtx.Spec.SelectedNode
logger.V(5).Info("pending pod claims", "claims", claims, "selectedNode", selectedNode) logger.V(5).Info("pending pod claims", "claims", claims, "selectedNode", selectedNode)
if selectedNode != "" { if selectedNode != "" {
unsuitable := false unsuitable := false
@ -703,26 +703,26 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
// TODO: replace with patching the array. We can do that without race conditions // TODO: replace with patching the array. We can do that without race conditions
// because each driver is responsible for its own entries. // because each driver is responsible for its own entries.
modified := false modified := false
podScheduling = podScheduling.DeepCopy() schedulingCtx = schedulingCtx.DeepCopy()
for _, delayed := range claims { for _, delayed := range claims {
i := findClaim(podScheduling.Status.ResourceClaims, delayed.PodClaimName) i := findClaim(schedulingCtx.Status.ResourceClaims, delayed.PodClaimName)
if i < 0 { if i < 0 {
// Add new entry. // Add new entry.
podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims, schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
resourcev1alpha2.ResourceClaimSchedulingStatus{ resourcev1alpha2.ResourceClaimSchedulingStatus{
Name: delayed.PodClaimName, Name: delayed.PodClaimName,
UnsuitableNodes: delayed.UnsuitableNodes, UnsuitableNodes: delayed.UnsuitableNodes,
}) })
modified = true modified = true
} else if stringsDiffer(podScheduling.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) { } else if stringsDiffer(schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) {
// Update existing entry. // Update existing entry.
podScheduling.Status.ResourceClaims[i].UnsuitableNodes = delayed.UnsuitableNodes schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes = delayed.UnsuitableNodes
modified = true modified = true
} }
} }
if modified { if modified {
logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podScheduling", podScheduling) logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podSchedulingCtx", schedulingCtx)
if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).UpdateStatus(ctx, podScheduling, metav1.UpdateOptions{}); err != nil { if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).UpdateStatus(ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("update unsuitable node status: %v", err) return fmt.Errorf("update unsuitable node status: %v", err)
} }
} }

View File

@ -55,10 +55,10 @@ func TestController(t *testing.T) {
delayedClaim := claim.DeepCopy() delayedClaim := claim.DeepCopy()
delayedClaim.Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer delayedClaim.Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer
podName := "pod" podName := "pod"
podKey := "podscheduling:default/pod" podKey := "schedulingCtx:default/pod"
pod := createPod(podName, claimNamespace, nil) pod := createPod(podName, claimNamespace, nil)
podClaimName := "my-pod-claim" podClaimName := "my-pod-claim"
podScheduling := createPodScheduling(pod) podSchedulingCtx := createPodSchedulingContexts(pod)
podWithClaim := createPod(podName, claimNamespace, map[string]string{podClaimName: claimName}) podWithClaim := createPod(podName, claimNamespace, map[string]string{podClaimName: claimName})
nodeName := "worker" nodeName := "worker"
otherNodeName := "worker-2" otherNodeName := "worker-2"
@ -96,22 +96,22 @@ func TestController(t *testing.T) {
claim.Status.DeallocationRequested = true claim.Status.DeallocationRequested = true
return claim return claim
} }
withSelectedNode := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { withSelectedNode := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
podScheduling = podScheduling.DeepCopy() podSchedulingCtx = podSchedulingCtx.DeepCopy()
podScheduling.Spec.SelectedNode = nodeName podSchedulingCtx.Spec.SelectedNode = nodeName
return podScheduling return podSchedulingCtx
} }
withUnsuitableNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { withUnsuitableNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
podScheduling = podScheduling.DeepCopy() podSchedulingCtx = podSchedulingCtx.DeepCopy()
podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims, podSchedulingCtx.Status.ResourceClaims = append(podSchedulingCtx.Status.ResourceClaims,
resourcev1alpha2.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes}, resourcev1alpha2.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes},
) )
return podScheduling return podSchedulingCtx
} }
withPotentialNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling { withPotentialNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
podScheduling = podScheduling.DeepCopy() podSchedulingCtx = podSchedulingCtx.DeepCopy()
podScheduling.Spec.PotentialNodes = potentialNodes podSchedulingCtx.Spec.PotentialNodes = potentialNodes
return podScheduling return podSchedulingCtx
} }
var m mockDriver var m mockDriver
@ -121,7 +121,7 @@ func TestController(t *testing.T) {
driver mockDriver driver mockDriver
classes []*resourcev1alpha2.ResourceClass classes []*resourcev1alpha2.ResourceClass
pod *corev1.Pod pod *corev1.Pod
podScheduling, expectedPodScheduling *resourcev1alpha2.PodScheduling schedulingCtx, expectedSchedulingCtx *resourcev1alpha2.PodSchedulingContext
claim, expectedClaim *resourcev1alpha2.ResourceClaim claim, expectedClaim *resourcev1alpha2.ResourceClaim
expectedError string expectedError string
}{ }{
@ -308,8 +308,8 @@ func TestController(t *testing.T) {
"pod-nop": { "pod-nop": {
key: podKey, key: podKey,
pod: pod, pod: pod,
podScheduling: withSelectedNode(podScheduling), schedulingCtx: withSelectedNode(podSchedulingCtx),
expectedPodScheduling: withSelectedNode(podScheduling), expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
expectedError: errPeriodic.Error(), expectedError: errPeriodic.Error(),
}, },
@ -319,8 +319,8 @@ func TestController(t *testing.T) {
claim: claim, claim: claim,
expectedClaim: claim, expectedClaim: claim,
pod: podWithClaim, pod: podWithClaim,
podScheduling: withSelectedNode(podScheduling), schedulingCtx: withSelectedNode(podSchedulingCtx),
expectedPodScheduling: withSelectedNode(podScheduling), expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
expectedError: errPeriodic.Error(), expectedError: errPeriodic.Error(),
}, },
@ -331,8 +331,8 @@ func TestController(t *testing.T) {
claim: delayedClaim, claim: delayedClaim,
expectedClaim: delayedClaim, expectedClaim: delayedClaim,
pod: podWithClaim, pod: podWithClaim,
podScheduling: podScheduling, schedulingCtx: podSchedulingCtx,
expectedPodScheduling: podScheduling, expectedSchedulingCtx: podSchedulingCtx,
}, },
// pod with delayed allocation, potential nodes -> provide unsuitable nodes // pod with delayed allocation, potential nodes -> provide unsuitable nodes
@ -342,11 +342,11 @@ func TestController(t *testing.T) {
claim: delayedClaim, claim: delayedClaim,
expectedClaim: delayedClaim, expectedClaim: delayedClaim,
pod: podWithClaim, pod: podWithClaim,
podScheduling: withPotentialNodes(podScheduling), schedulingCtx: withPotentialNodes(podSchedulingCtx),
driver: m.expectClassParameters(map[string]interface{}{className: 1}). driver: m.expectClassParameters(map[string]interface{}{className: 1}).
expectClaimParameters(map[string]interface{}{claimName: 2}). expectClaimParameters(map[string]interface{}{claimName: 2}).
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil), expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil),
expectedPodScheduling: withUnsuitableNodes(withPotentialNodes(podScheduling)), expectedSchedulingCtx: withUnsuitableNodes(withPotentialNodes(podSchedulingCtx)),
expectedError: errPeriodic.Error(), expectedError: errPeriodic.Error(),
}, },
@ -356,8 +356,8 @@ func TestController(t *testing.T) {
claim: delayedClaim, claim: delayedClaim,
expectedClaim: delayedClaim, expectedClaim: delayedClaim,
pod: podWithClaim, pod: podWithClaim,
podScheduling: withSelectedNode(withPotentialNodes(podScheduling)), schedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
expectedPodScheduling: withSelectedNode(withPotentialNodes(podScheduling)), expectedSchedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
expectedError: `pod claim my-pod-claim: resourceclass.resource.k8s.io "mock-class" not found`, expectedError: `pod claim my-pod-claim: resourceclass.resource.k8s.io "mock-class" not found`,
}, },
@ -368,12 +368,12 @@ func TestController(t *testing.T) {
claim: delayedClaim, claim: delayedClaim,
expectedClaim: withReservedFor(withAllocate(delayedClaim), pod), expectedClaim: withReservedFor(withAllocate(delayedClaim), pod),
pod: podWithClaim, pod: podWithClaim,
podScheduling: withSelectedNode(withPotentialNodes(podScheduling)), schedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
driver: m.expectClassParameters(map[string]interface{}{className: 1}). driver: m.expectClassParameters(map[string]interface{}{className: 1}).
expectClaimParameters(map[string]interface{}{claimName: 2}). expectClaimParameters(map[string]interface{}{claimName: 2}).
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil). expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil).
expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}), expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}),
expectedPodScheduling: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podScheduling))), expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))),
expectedError: errPeriodic.Error(), expectedError: errPeriodic.Error(),
}, },
} { } {
@ -388,8 +388,8 @@ func TestController(t *testing.T) {
if test.pod != nil { if test.pod != nil {
initialObjects = append(initialObjects, test.pod) initialObjects = append(initialObjects, test.pod)
} }
if test.podScheduling != nil { if test.schedulingCtx != nil {
initialObjects = append(initialObjects, test.podScheduling) initialObjects = append(initialObjects, test.schedulingCtx)
} }
if test.claim != nil { if test.claim != nil {
initialObjects = append(initialObjects, test.claim) initialObjects = append(initialObjects, test.claim)
@ -398,7 +398,7 @@ func TestController(t *testing.T) {
rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses() rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses()
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
podInformer := informerFactory.Core().V1().Pods() podInformer := informerFactory.Core().V1().Pods()
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings() podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
// Order is important: on function exit, we first must // Order is important: on function exit, we first must
// cancel, then wait (last-in-first-out). // cancel, then wait (last-in-first-out).
defer informerFactory.Shutdown() defer informerFactory.Shutdown()
@ -412,7 +412,7 @@ func TestController(t *testing.T) {
require.NoError(t, claimInformer.Informer().GetStore().Add(obj), "add resource claim") require.NoError(t, claimInformer.Informer().GetStore().Add(obj), "add resource claim")
case *corev1.Pod: case *corev1.Pod:
require.NoError(t, podInformer.Informer().GetStore().Add(obj), "add pod") require.NoError(t, podInformer.Informer().GetStore().Add(obj), "add pod")
case *resourcev1alpha2.PodScheduling: case *resourcev1alpha2.PodSchedulingContext:
require.NoError(t, podSchedulingInformer.Informer().GetStore().Add(obj), "add pod scheduling") require.NoError(t, podSchedulingInformer.Informer().GetStore().Add(obj), "add pod scheduling")
default: default:
t.Fatalf("unknown initialObject type: %+v", obj) t.Fatalf("unknown initialObject type: %+v", obj)
@ -427,7 +427,7 @@ func TestController(t *testing.T) {
if !cache.WaitForCacheSync(ctx.Done(), if !cache.WaitForCacheSync(ctx.Done(),
informerFactory.Resource().V1alpha2().ResourceClasses().Informer().HasSynced, informerFactory.Resource().V1alpha2().ResourceClasses().Informer().HasSynced,
informerFactory.Resource().V1alpha2().ResourceClaims().Informer().HasSynced, informerFactory.Resource().V1alpha2().ResourceClaims().Informer().HasSynced,
informerFactory.Resource().V1alpha2().PodSchedulings().Informer().HasSynced, informerFactory.Resource().V1alpha2().PodSchedulingContexts().Informer().HasSynced,
) { ) {
t.Fatal("could not sync caches") t.Fatal("could not sync caches")
} }
@ -449,11 +449,11 @@ func TestController(t *testing.T) {
} }
assert.Equal(t, expectedClaims, claims.Items) assert.Equal(t, expectedClaims, claims.Items)
podSchedulings, err := kubeClient.ResourceV1alpha2().PodSchedulings("").List(ctx, metav1.ListOptions{}) podSchedulings, err := kubeClient.ResourceV1alpha2().PodSchedulingContexts("").List(ctx, metav1.ListOptions{})
require.NoError(t, err, "list pod schedulings") require.NoError(t, err, "list pod schedulings")
var expectedPodSchedulings []resourcev1alpha2.PodScheduling var expectedPodSchedulings []resourcev1alpha2.PodSchedulingContext
if test.expectedPodScheduling != nil { if test.expectedSchedulingCtx != nil {
expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedPodScheduling) expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedSchedulingCtx)
} }
assert.Equal(t, expectedPodSchedulings, podSchedulings.Items) assert.Equal(t, expectedPodSchedulings, podSchedulings.Items)
@ -620,9 +620,9 @@ func createPod(podName, podNamespace string, claims map[string]string) *corev1.P
return pod return pod
} }
func createPodScheduling(pod *corev1.Pod) *resourcev1alpha2.PodScheduling { func createPodSchedulingContexts(pod *corev1.Pod) *resourcev1alpha2.PodSchedulingContext {
controller := true controller := true
return &resourcev1alpha2.PodScheduling{ return &resourcev1alpha2.PodSchedulingContext{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: pod.Name, Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,

View File

@ -64,7 +64,7 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`, gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`,
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulings. gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts.
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`, gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`, gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
// standard for []metav1.Condition // standard for []metav1.Condition
@ -88,10 +88,10 @@ var noConflicts = map[string]struct{}{
// namespaces only have a spec.finalizers field which is also skipped, // namespaces only have a spec.finalizers field which is also skipped,
// thus it will never have a conflict. // thus it will never have a conflict.
"namespaces": {}, "namespaces": {},
// podschedulings.status only has a list which contains items with a list, // podschedulingcontexts.status only has a list which contains items with a list,
// therefore apply works because it simply merges either the outer or // therefore apply works because it simply merges either the outer or
// the inner list. // the inner list.
"podschedulings": {}, "podschedulingcontexts": {},
} }
var image2 = image.GetE2EImage(image.Etcd) var image2 = image.GetE2EImage(image.Etcd)
@ -148,7 +148,7 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`, gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`,
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"spec": {"selectedNode": "node2name"}}`, gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`,
gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`, gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`,
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test. gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`, gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,

View File

@ -54,7 +54,7 @@ var statusData = map[schema.GroupVersionResource]string{
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`, gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`,
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`, gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`, gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`, gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "example.com"}}`, gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`, gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
// standard for []metav1.Condition // standard for []metav1.Condition

View File

@ -469,9 +469,9 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`, Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`,
ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name", ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name",
}, },
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): { gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): {
Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`, Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`,
ExpectedEtcdPath: "/registry/podschedulings/" + namespace + "/pod1name", ExpectedEtcdPath: "/registry/podschedulingcontexts/" + namespace + "/pod1name",
}, },
// -- // --