Merge pull request #111023 from pohly/dynamic-resource-allocation

dynamic resource allocation
This commit is contained in:
Kubernetes Prow Robot 2022-11-11 16:21:56 -08:00 committed by GitHub
commit d1c0171aed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
329 changed files with 47972 additions and 1230 deletions

File diff suppressed because it is too large Load Diff

View File

@ -691,6 +691,20 @@
],
"type": "object"
},
"io.k8s.api.core.v1.ClaimSource": {
"description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.",
"properties": {
"resourceClaimName": {
"description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.",
"type": "string"
},
"resourceClaimTemplateName": {
"description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be <pod name>-<resource name>, where <resource name> is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long).\n\nAn existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
"type": "string"
}
},
"type": "object"
},
"io.k8s.api.core.v1.ClientIPConfig": {
"description": "ClientIPConfig represents the configurations of Client IP based session affinity.",
"properties": {
@ -4869,6 +4883,29 @@
],
"type": "object"
},
"io.k8s.api.core.v1.PodResourceClaim": {
"description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
"properties": {
"name": {
"default": "",
"description": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
"type": "string"
},
"source": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ClaimSource"
}
],
"default": {},
"description": "Source describes where to find the ResourceClaim."
}
},
"required": [
"name"
],
"type": "object"
},
"io.k8s.api.core.v1.PodSchedulingGate": {
"description": "PodSchedulingGate is associated to a Pod to guard its scheduling.",
"properties": {
@ -5141,6 +5178,24 @@
},
"type": "array"
},
"resourceClaims": {
"description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.PodResourceClaim"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-map-keys": [
"name"
],
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys"
},
"restartPolicy": {
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\n",
"type": "string"
@ -5981,6 +6036,20 @@
],
"type": "object"
},
"io.k8s.api.core.v1.ResourceClaim": {
"description": "ResourceClaim references one entry in PodSpec.ResourceClaims.",
"properties": {
"name": {
"default": "",
"description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"io.k8s.api.core.v1.ResourceFieldSelector": {
"description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
"properties": {
@ -6169,6 +6238,19 @@
"io.k8s.api.core.v1.ResourceRequirements": {
"description": "ResourceRequirements describes the compute resource requirements.",
"properties": {
"claims": {
"description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceClaim"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "set"
},
"limits": {
"additionalProperties": {
"allOf": [
@ -8313,6 +8395,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -8695,6 +8782,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -9033,6 +9125,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -843,6 +843,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1220,6 +1225,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1558,6 +1568,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -796,6 +796,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1173,6 +1178,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1511,6 +1521,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -1228,6 +1228,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1548,6 +1553,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1886,6 +1896,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -1571,6 +1571,20 @@
],
"type": "object"
},
"io.k8s.api.core.v1.ClaimSource": {
"description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.",
"properties": {
"resourceClaimName": {
"description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.",
"type": "string"
},
"resourceClaimTemplateName": {
"description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be <pod name>-<resource name>, where <resource name> is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long).\n\nAn existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
"type": "string"
}
},
"type": "object"
},
"io.k8s.api.core.v1.ConfigMapEnvSource": {
"description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.",
"properties": {
@ -3311,6 +3325,29 @@
],
"type": "object"
},
"io.k8s.api.core.v1.PodResourceClaim": {
"description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
"properties": {
"name": {
"default": "",
"description": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
"type": "string"
},
"source": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ClaimSource"
}
],
"default": {},
"description": "Source describes where to find the ResourceClaim."
}
},
"required": [
"name"
],
"type": "object"
},
"io.k8s.api.core.v1.PodSchedulingGate": {
"description": "PodSchedulingGate is associated to a Pod to guard its scheduling.",
"properties": {
@ -3583,6 +3620,24 @@
},
"type": "array"
},
"resourceClaims": {
"description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.PodResourceClaim"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-map-keys": [
"name"
],
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys"
},
"restartPolicy": {
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\n",
"type": "string"
@ -3946,6 +4001,20 @@
],
"type": "object"
},
"io.k8s.api.core.v1.ResourceClaim": {
"description": "ResourceClaim references one entry in PodSpec.ResourceClaims.",
"properties": {
"name": {
"default": "",
"description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"io.k8s.api.core.v1.ResourceFieldSelector": {
"description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
"properties": {
@ -3977,6 +4046,19 @@
"io.k8s.api.core.v1.ResourceRequirements": {
"description": "ResourceRequirements describes the compute resource requirements.",
"properties": {
"claims": {
"description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceClaim"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "set"
},
"limits": {
"additionalProperties": {
"allOf": [
@ -5330,6 +5412,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -5707,6 +5794,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -6045,6 +6137,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -598,6 +598,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -918,6 +923,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1256,6 +1266,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -1251,6 +1251,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1628,6 +1633,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1966,6 +1976,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -862,6 +862,20 @@
],
"type": "object"
},
"io.k8s.api.core.v1.ClaimSource": {
"description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.",
"properties": {
"resourceClaimName": {
"description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.",
"type": "string"
},
"resourceClaimTemplateName": {
"description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be <pod name>-<resource name>, where <resource name> is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long).\n\nAn existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
"type": "string"
}
},
"type": "object"
},
"io.k8s.api.core.v1.ConfigMapEnvSource": {
"description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.",
"properties": {
@ -2485,6 +2499,29 @@
],
"type": "object"
},
"io.k8s.api.core.v1.PodResourceClaim": {
"description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
"properties": {
"name": {
"default": "",
"description": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
"type": "string"
},
"source": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ClaimSource"
}
],
"default": {},
"description": "Source describes where to find the ResourceClaim."
}
},
"required": [
"name"
],
"type": "object"
},
"io.k8s.api.core.v1.PodSchedulingGate": {
"description": "PodSchedulingGate is associated to a Pod to guard its scheduling.",
"properties": {
@ -2757,6 +2794,24 @@
},
"type": "array"
},
"resourceClaims": {
"description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.PodResourceClaim"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-map-keys": [
"name"
],
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys"
},
"restartPolicy": {
"description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\n",
"type": "string"
@ -3120,6 +3175,20 @@
],
"type": "object"
},
"io.k8s.api.core.v1.ResourceClaim": {
"description": "ResourceClaim references one entry in PodSpec.ResourceClaims.",
"properties": {
"name": {
"default": "",
"description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"io.k8s.api.core.v1.ResourceFieldSelector": {
"description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
"properties": {
@ -3151,6 +3220,19 @@
"io.k8s.api.core.v1.ResourceRequirements": {
"description": "ResourceRequirements describes the compute resource requirements.",
"properties": {
"claims": {
"description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceClaim"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "set"
},
"limits": {
"additionalProperties": {
"allOf": [
@ -4504,6 +4586,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -4881,6 +4968,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -5219,6 +5311,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -636,6 +636,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -956,6 +961,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1294,6 +1304,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -525,6 +525,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -850,6 +855,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1188,6 +1198,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -688,6 +688,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1008,6 +1013,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1346,6 +1356,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -647,6 +647,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -972,6 +977,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1310,6 +1320,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -1108,6 +1108,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1428,6 +1433,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1766,6 +1776,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -1112,6 +1112,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1432,6 +1437,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1770,6 +1780,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -615,6 +615,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -935,6 +940,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1273,6 +1283,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -1276,6 +1276,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1653,6 +1658,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1991,6 +2001,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -603,6 +603,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -923,6 +928,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1261,6 +1271,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -588,6 +588,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -908,6 +913,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1246,6 +1256,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -651,6 +651,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1028,6 +1033,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1366,6 +1376,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -908,6 +908,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -1285,6 +1290,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1623,6 +1633,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,158 @@
{
"components": {
"schemas": {
"io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": {
"description": "APIGroup contains the name, the supported versions, and the preferred version of a group.",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": "string"
},
"kind": {
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"type": "string"
},
"name": {
"default": "",
"description": "name is the name of the group.",
"type": "string"
},
"preferredVersion": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
}
],
"default": {},
"description": "preferredVersion is the version preferred by the API server, which probably is the storage version."
},
"serverAddressByClientCIDRs": {
"description": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR"
}
],
"default": {}
},
"type": "array"
},
"versions": {
"description": "versions are the versions supported in this group.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
}
],
"default": {}
},
"type": "array"
}
},
"required": [
"name",
"versions"
],
"type": "object",
"x-kubernetes-group-version-kind": [
{
"group": "",
"kind": "APIGroup",
"version": "v1"
}
]
},
"io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery": {
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"properties": {
"groupVersion": {
"default": "",
"description": "groupVersion specifies the API group and version in the form \"group/version\"",
"type": "string"
},
"version": {
"default": "",
"description": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
"type": "string"
}
},
"required": [
"groupVersion",
"version"
],
"type": "object"
},
"io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR": {
"description": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
"properties": {
"clientCIDR": {
"default": "",
"description": "The CIDR with which clients can match their IP to figure out the server address that they should use.",
"type": "string"
},
"serverAddress": {
"default": "",
"description": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
"type": "string"
}
},
"required": [
"clientCIDR",
"serverAddress"
],
"type": "object"
}
},
"securitySchemes": {
"BearerToken": {
"description": "Bearer Token authentication",
"in": "header",
"name": "authorization",
"type": "apiKey"
}
}
},
"info": {
"title": "Kubernetes",
"version": "unversioned"
},
"openapi": "3.0.0",
"paths": {
"/apis/resource.k8s.io/": {
"get": {
"description": "get information of a group",
"operationId": "getResourceAPIGroup",
"responses": {
"200": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
}
},
"application/vnd.kubernetes.protobuf": {
"schema": {
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
}
},
"application/yaml": {
"schema": {
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
}
}
},
"description": "OK"
},
"401": {
"description": "Unauthorized"
}
},
"tags": [
"resource"
]
}
}
}
}

View File

@ -501,6 +501,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -821,6 +826,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1159,6 +1169,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -2340,6 +2340,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -2717,6 +2722,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -3055,6 +3065,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -527,6 +527,11 @@
"kind": "DeleteOptions",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "DeleteOptions",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "DeleteOptions",
@ -904,6 +909,11 @@
"group": "",
"kind": "Status",
"version": "v1"
},
{
"group": "resource.k8s.io",
"kind": "Status",
"version": "v1alpha1"
}
]
},
@ -1242,6 +1252,11 @@
"kind": "WatchEvent",
"version": "v1beta1"
},
{
"group": "resource.k8s.io",
"kind": "WatchEvent",
"version": "v1alpha1"
},
{
"group": "scheduling.k8s.io",
"kind": "WatchEvent",

View File

@ -283,6 +283,7 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{
{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1"}: {group: 16100, version: 12},
{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1"}: {group: 16100, version: 9},
{Group: "internal.apiserver.k8s.io", Version: "v1alpha1"}: {group: 16000, version: 9},
{Group: "resource.k8s.io", Version: "v1alpha1"}: {group: 15900, version: 9},
// Append a new group to the end of the list if unsure.
// You can use min(existing group)-100 as the initial value for a group.
// Version can be set to 9 (to have space around) for a new group.

View File

@ -475,6 +475,9 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc
utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StorageVersionAPI) {
register("storage-version-gc", startStorageVersionGCController)
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
controllers["resource-claim-controller"] = startResourceClaimController
}
return controllers
}

View File

@ -54,6 +54,7 @@ import (
lifecyclecontroller "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/controller/podgc"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/controller/resourceclaim"
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/controller/storageversiongc"
@ -357,6 +358,21 @@ func startEphemeralVolumeController(ctx context.Context, controllerContext Contr
return nil, true, nil
}
const defaultResourceClaimControllerWorkers = 10
func startResourceClaimController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
ephemeralController, err := resourceclaim.NewController(
controllerContext.ClientBuilder.ClientOrDie("resource-claim-controller"),
controllerContext.InformerFactory.Core().V1().Pods(),
controllerContext.InformerFactory.Resource().V1alpha1().ResourceClaims(),
controllerContext.InformerFactory.Resource().V1alpha1().ResourceClaimTemplates())
if err != nil {
return nil, true, fmt.Errorf("failed to start ephemeral volume controller: %v", err)
}
go ephemeralController.Run(ctx, defaultResourceClaimControllerWorkers)
return nil, true, nil
}
func startEndpointController(ctx context.Context, controllerCtx ControllerContext) (controller.Interface, bool, error) {
go endpointcontroller.NewEndpointController(
controllerCtx.InformerFactory.Core().V1().Pods(),

View File

@ -33,6 +33,7 @@ import (
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"
)

View File

@ -41,7 +41,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
otelsdkresource "go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/semconv/v1.12.0"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
oteltrace "go.opentelemetry.io/otel/trace"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -756,7 +756,9 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
ExperimentalTopologyManagerPolicyOptions: topologyManagerPolicyOptions,
},
s.FailSwapOn,
kubeDeps.Recorder)
kubeDeps.Recorder,
kubeDeps.KubeClient,
)
if err != nil {
return err

2
go.mod
View File

@ -107,6 +107,7 @@ require (
k8s.io/controller-manager v0.0.0
k8s.io/cri-api v0.0.0
k8s.io/csi-translation-lib v0.0.0
k8s.io/dynamic-resource-allocation v0.0.0
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d
k8s.io/klog/v2 v2.80.1
k8s.io/kms v0.0.0
@ -261,6 +262,7 @@ replace (
k8s.io/controller-manager => ./staging/src/k8s.io/controller-manager
k8s.io/cri-api => ./staging/src/k8s.io/cri-api
k8s.io/csi-translation-lib => ./staging/src/k8s.io/csi-translation-lib
k8s.io/dynamic-resource-allocation => ./staging/src/k8s.io/dynamic-resource-allocation
k8s.io/kms => ./staging/src/k8s.io/kms
k8s.io/kube-aggregator => ./staging/src/k8s.io/kube-aggregator
k8s.io/kube-controller-manager => ./staging/src/k8s.io/kube-controller-manager

View File

@ -89,6 +89,7 @@ coordination.k8s.io/v1beta1 \
coordination.k8s.io/v1 \
discovery.k8s.io/v1 \
discovery.k8s.io/v1beta1 \
resource.k8s.io/v1alpha1 \
extensions/v1beta1 \
events.k8s.io/v1 \
events.k8s.io/v1beta1 \

View File

@ -25,6 +25,8 @@ structured k8s.io/kubernetes/pkg/scheduler/.*
# Packages matched here do not have to be listed above because
# "contextual" implies "structured".
# TODO next: contextual k8s.io/kubernetes/pkg/scheduler/.*
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/dynamic-resource-allocation/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -40,6 +40,7 @@ BASH_TARGETS="
update-codegen
update-generated-runtime
update-generated-device-plugin
update-generated-dynamic-resource-allocation
update-generated-api-compatibility-data
update-generated-docs
update-generated-swagger-docs

View File

@ -9,6 +9,8 @@
"github.com/go-openapi/strfmt": "use k8s.io/kube-openapi/pkg/validation/strfmt instead",
"github.com/go-openapi/validate": "use k8s.io/kube-openapi/pkg/validation/validate instead",
"github.com/hashicorp/consul": "MPL license not in CNCF allowlist",
"github.com/hashicorp/errwrap": "MPL license not in CNCF allowlist",
"github.com/hashicorp/go-multierror": "MPL license not in CNCF allowlist",
"github.com/hashicorp/golang-lru": "MPL license not in CNCF allowlist",
"github.com/hashicorp/hcl": "MPL license not in CNCF allowlist",
"github.com/influxdata/influxdb1-client": "",
@ -17,7 +19,10 @@
"github.com/onsi/ginkgo": "Ginkgo has been migrated to V2, refer to #109111",
"github.com/spf13/viper": "refer to #102598",
"go.mongodb.org/mongo-driver": "",
"gopkg.in/fsnotify.v1": "obsolete, use github.com/fsnotify/fsnotify",
"k8s.io/klog": "we have switched to klog v2, so avoid klog v1",
"github.com/mndrix/tap-go": "unmaintained",
"github.com/xeipuuv/gojsonschema": "unmaintained",
"rsc.io/quote": "refer to #102833",
"rsc.io/sampler": "refer to #102833"
}

View File

@ -0,0 +1,29 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates `*/api.pb.go` from the protobuf file `*/api.proto`.
# Example:
# kube::protoc::generate_proto "${DYNAMIC_RESOURCE_ALLOCATION_ALPHA}"
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" && pwd -P)"
DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1="${KUBE_ROOT}/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha1/"
source "${KUBE_ROOT}/hack/lib/protoc.sh"
kube::protoc::generate_proto "${DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1}"

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
# NOTE: All output from this script needs to be copied back to the calling
# source tree. This is managed in kube::build::copy_output in build/common.sh.
# If the output set is changed update that function.
"${KUBE_ROOT}/build/run.sh" hack/update-generated-dynamic-resource-allocation-dockerized.sh "$@"

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Copyright 2022 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script checks whether updating of device plugin API is needed or not. We
# should run `hack/update-generated-dynamic-resource-allocation.sh` if device plugin API is
# out of date.
# Usage: `hack/verify-generated-dynamic-resource-allocation.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
ERROR="Dynamic resource allocation kubelet plugin api is out of date. Please run hack/update-generated-dynamic-resource-allocation.sh"
DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1="${KUBE_ROOT}/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha1/"
source "${KUBE_ROOT}/hack/lib/protoc.sh"
kube::golang::setup_env
function cleanup {
rm -rf "${DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1}/_tmp/"
}
trap cleanup EXIT
mkdir -p "${DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1}/_tmp"
cp "${DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1}/api.pb.go" "${DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1}/_tmp/"
KUBE_VERBOSE=3 "${KUBE_ROOT}/hack/update-generated-dynamic-resource-allocation.sh"
kube::protoc::diff "${DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1}/api.pb.go" "${DYNAMIC_RESOURCE_ALLOCATION_V1ALPHA1}/_tmp/api.pb.go" "${ERROR}"
echo "Generated dynamic resource allocation kubelet plugin alpha api is up to date."

View File

@ -550,6 +550,42 @@ func dropDisabledFields(
dropDisabledTopologySpreadConstraintsFields(podSpec, oldPodSpec)
dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec)
dropDisabledMatchLabelKeysField(podSpec, oldPodSpec)
dropDisabledDynamicResourceAllocationFields(podSpec, oldPodSpec)
}
// dropDisabledDynamicResourceAllocationFields removes pod claim references from
// container specs and pod-level resource claims unless they are already used
// by the old pod spec.
func dropDisabledDynamicResourceAllocationFields(podSpec, oldPodSpec *api.PodSpec) {
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
dropResourceClaimRequests(podSpec.Containers)
dropResourceClaimRequests(podSpec.InitContainers)
dropEphemeralResourceClaimRequests(podSpec.EphemeralContainers)
podSpec.ResourceClaims = nil
}
}
func dynamicResourceAllocationInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
// We only need to check this field because the containers cannot have
// resource requirements entries for claims without a corresponding
// entry at the pod spec level.
return len(podSpec.ResourceClaims) > 0
}
func dropResourceClaimRequests(containers []api.Container) {
for i := range containers {
containers[i].Resources.Claims = nil
}
}
func dropEphemeralResourceClaimRequests(containers []api.EphemeralContainer) {
for i := range containers {
containers[i].Resources.Claims = nil
}
}
// dropDisabledTopologySpreadConstraintsFields removes disabled fields from PodSpec related

View File

@ -784,6 +784,165 @@ func TestDropAppArmor(t *testing.T) {
}
}
func TestDropDynamicResourceAllocation(t *testing.T) {
resourceClaimName := "external-claim"
podWithClaims := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Claims: []api.ResourceClaim{{Name: "my-claim"}},
},
},
},
InitContainers: []api.Container{
{
Resources: api.ResourceRequirements{
Claims: []api.ResourceClaim{{Name: "my-claim"}},
},
},
},
EphemeralContainers: []api.EphemeralContainer{
{
EphemeralContainerCommon: api.EphemeralContainerCommon{
Resources: api.ResourceRequirements{
Claims: []api.ResourceClaim{{Name: "my-claim"}},
},
},
},
},
ResourceClaims: []api.PodResourceClaim{
{
Name: "my-claim",
Source: api.ClaimSource{
ResourceClaimName: &resourceClaimName,
},
},
},
},
}
podWithoutClaims := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{{}},
InitContainers: []api.Container{{}},
EphemeralContainers: []api.EphemeralContainer{{}},
},
}
var noPod *api.Pod
testcases := []struct {
description string
enabled bool
oldPod *api.Pod
newPod *api.Pod
wantPod *api.Pod
}{
{
description: "old with claims / new with claims / disabled",
oldPod: podWithClaims,
newPod: podWithClaims,
wantPod: podWithClaims,
},
{
description: "old without claims / new with claims / disabled",
oldPod: podWithoutClaims,
newPod: podWithClaims,
wantPod: podWithoutClaims,
},
{
description: "no old pod/ new with claims / disabled",
oldPod: noPod,
newPod: podWithClaims,
wantPod: podWithoutClaims,
},
{
description: "old with claims / new without claims / disabled",
oldPod: podWithClaims,
newPod: podWithoutClaims,
wantPod: podWithoutClaims,
},
{
description: "old without claims / new without claims / disabled",
oldPod: podWithoutClaims,
newPod: podWithoutClaims,
wantPod: podWithoutClaims,
},
{
description: "no old pod/ new without claims / disabled",
oldPod: noPod,
newPod: podWithoutClaims,
wantPod: podWithoutClaims,
},
{
description: "old with claims / new with claims / enabled",
enabled: true,
oldPod: podWithClaims,
newPod: podWithClaims,
wantPod: podWithClaims,
},
{
description: "old without claims / new with claims / enabled",
enabled: true,
oldPod: podWithoutClaims,
newPod: podWithClaims,
wantPod: podWithClaims,
},
{
description: "no old pod/ new with claims / enabled",
enabled: true,
oldPod: noPod,
newPod: podWithClaims,
wantPod: podWithClaims,
},
{
description: "old with claims / new without claims / enabled",
enabled: true,
oldPod: podWithClaims,
newPod: podWithoutClaims,
wantPod: podWithoutClaims,
},
{
description: "old without claims / new without claims / enabled",
enabled: true,
oldPod: podWithoutClaims,
newPod: podWithoutClaims,
wantPod: podWithoutClaims,
},
{
description: "no old pod/ new without claims / enabled",
enabled: true,
oldPod: noPod,
newPod: podWithoutClaims,
wantPod: podWithoutClaims,
},
}
for _, tc := range testcases {
t.Run(tc.description, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicResourceAllocation, tc.enabled)()
oldPod := tc.oldPod.DeepCopy()
newPod := tc.newPod.DeepCopy()
wantPod := tc.wantPod
DropDisabledPodFields(newPod, oldPod)
// old pod should never be changed
if diff := cmp.Diff(oldPod, tc.oldPod); diff != "" {
t.Errorf("old pod changed: %s", diff)
}
if diff := cmp.Diff(wantPod, newPod); diff != "" {
t.Errorf("new pod changed (- want, + got): %s", diff)
}
})
}
}
func TestDropProbeGracePeriod(t *testing.T) {
podWithProbeGracePeriod := func() *api.Pod {
livenessGracePeriod := int64(10)

View File

@ -139,6 +139,10 @@ func TestDefaulting(t *testing.T) {
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBindingList"}: {},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}: {},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBindingList"}: {},
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaim"}: {},
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimList"}: {},
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimTemplate"}: {},
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimTemplateList"}: {},
{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicy"}: {},
{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyList"}: {},
{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyBinding"}: {},

View File

@ -42,6 +42,7 @@ import (
networkingfuzzer "k8s.io/kubernetes/pkg/apis/networking/fuzzer"
policyfuzzer "k8s.io/kubernetes/pkg/apis/policy/fuzzer"
rbacfuzzer "k8s.io/kubernetes/pkg/apis/rbac/fuzzer"
resourcefuzzer "k8s.io/kubernetes/pkg/apis/resource/fuzzer"
schedulingfuzzer "k8s.io/kubernetes/pkg/apis/scheduling/fuzzer"
storagefuzzer "k8s.io/kubernetes/pkg/apis/storage/fuzzer"
)
@ -101,6 +102,7 @@ var FuzzerFuncs = fuzzer.MergeFuzzerFuncs(
autoscalingfuzzer.Funcs,
rbacfuzzer.Funcs,
policyfuzzer.Funcs,
resourcefuzzer.Funcs,
certificatesfuzzer.Funcs,
admissionregistrationfuzzer.Funcs,
storagefuzzer.Funcs,

View File

@ -37,6 +37,7 @@ import (
_ "k8s.io/kubernetes/pkg/apis/node/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"
)

View File

@ -2185,6 +2185,25 @@ type ResourceRequirements struct {
// otherwise to an implementation-defined value
// +optional
Requests ResourceList
// Claims lists the names of resources, defined in spec.resourceClaims,
// that are used by this container.
//
// This is an alpha field and requires enabling the
// DynamicResourceAllocation feature gate.
//
// This field is immutable.
//
// +featureGate=DynamicResourceAllocation
// +optional
Claims []ResourceClaim
}
// ResourceClaim references one entry in PodSpec.ResourceClaims.
type ResourceClaim struct {
// Name must match the name of one entry in pod.spec.resourceClaims of
// the Pod where this field is used. It makes that resource available
// inside a container.
Name string
}
// Container represents a single container that is expected to be run on the host.
@ -3024,12 +3043,68 @@ type PodSpec struct {
// - spec.containers[*].securityContext.runAsGroup
// +optional
OS *PodOS
// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
// More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.
//
// This is an alpha-level feature enabled by PodSchedulingReadiness feature gate.
// +optional
SchedulingGates []PodSchedulingGate
// ResourceClaims defines which ResourceClaims must be allocated
// and reserved before the Pod is allowed to start. The resources
// will be made available to those containers which consume them
// by name.
//
// This is an alpha field and requires enabling the
// DynamicResourceAllocation feature gate.
//
// This field is immutable.
//
// +featureGate=DynamicResourceAllocation
// +optional
ResourceClaims []PodResourceClaim
}
// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
// Containers that need access to the ResourceClaim reference it with this name.
type PodResourceClaim struct {
// Name uniquely identifies this resource claim inside the pod.
// This must be a DNS_LABEL.
Name string
// Source describes where to find the ResourceClaim.
Source ClaimSource
}
// ClaimSource describes a reference to a ResourceClaim.
//
// Exactly one of these fields should be set. Consumers of this type must
// treat an empty object as if it has an unknown value.
type ClaimSource struct {
// ResourceClaimName is the name of a ResourceClaim object in the same
// namespace as this pod.
ResourceClaimName *string
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
// object in the same namespace as this pod.
//
// The template will be used to create a new ResourceClaim, which will
// be bound to this pod. When this pod is deleted, the ResourceClaim
// will also be deleted. The name of the ResourceClaim will be <pod
// name>-<resource name>, where <resource name> is the
// PodResourceClaim.Name. Pod validation will reject the pod if the
// concatenated name is not valid for a ResourceClaim (e.g. too long).
//
// An existing ResourceClaim with that name that is not owned by the
// pod will not be used for the pod to avoid using an unrelated
// resource by mistake. Scheduling and pod startup are then blocked
// until the unrelated ResourceClaim is removed.
//
// This field is immutable and no changes will be made to the
// corresponding ResourceClaim by the control plane after creating the
// ResourceClaim.
ResourceClaimTemplateName *string
}
// OSName is the set of OS'es that can be used in OS.

View File

@ -192,6 +192,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ClaimSource)(nil), (*core.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClaimSource_To_core_ClaimSource(a.(*v1.ClaimSource), b.(*core.ClaimSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ClaimSource)(nil), (*v1.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ClaimSource_To_v1_ClaimSource(a.(*core.ClaimSource), b.(*v1.ClaimSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*v1.ClientIPConfig), b.(*core.ClientIPConfig), scope)
}); err != nil {
@ -1352,6 +1362,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.PodResourceClaim)(nil), (*core.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodResourceClaim_To_core_PodResourceClaim(a.(*v1.PodResourceClaim), b.(*core.PodResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodResourceClaim)(nil), (*v1.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodResourceClaim_To_v1_PodResourceClaim(a.(*core.PodResourceClaim), b.(*v1.PodResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.PodSchedulingGate)(nil), (*core.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(a.(*v1.PodSchedulingGate), b.(*core.PodSchedulingGate), scope)
}); err != nil {
@ -1572,6 +1592,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ResourceClaim)(nil), (*core.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaim_To_core_ResourceClaim(a.(*v1.ResourceClaim), b.(*core.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceClaim)(nil), (*v1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceClaim_To_v1_ResourceClaim(a.(*core.ResourceClaim), b.(*v1.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ResourceFieldSelector)(nil), (*core.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(a.(*v1.ResourceFieldSelector), b.(*core.ResourceFieldSelector), scope)
}); err != nil {
@ -2633,6 +2663,28 @@ func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVol
return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s)
}
func autoConvert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_v1_ClaimSource_To_core_ClaimSource is an autogenerated conversion function.
func Convert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
return autoConvert_v1_ClaimSource_To_core_ClaimSource(in, out, s)
}
func autoConvert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_core_ClaimSource_To_v1_ClaimSource is an autogenerated conversion function.
func Convert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
return autoConvert_core_ClaimSource_To_v1_ClaimSource(in, out, s)
}
func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error {
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
return nil
@ -6089,6 +6141,32 @@ func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessG
return autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in, out, s)
}
func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_ClaimSource_To_core_ClaimSource(&in.Source, &out.Source, s); err != nil {
return err
}
return nil
}
// Convert_v1_PodResourceClaim_To_core_PodResourceClaim is an autogenerated conversion function.
func Convert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
return autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in, out, s)
}
func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_core_ClaimSource_To_v1_ClaimSource(&in.Source, &out.Source, s); err != nil {
return err
}
return nil
}
// Convert_core_PodResourceClaim_To_v1_PodResourceClaim is an autogenerated conversion function.
func Convert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error {
return autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in, out, s)
}
func autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
out.Name = in.Name
return nil
@ -6229,6 +6307,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
out.OS = (*core.PodOS)(unsafe.Pointer(in.OS))
// INFO: in.HostUsers opted out of conversion generation
out.SchedulingGates = *(*[]core.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]core.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
return nil
}
@ -6283,6 +6362,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s
out.TopologySpreadConstraints = *(*[]v1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
out.OS = (*v1.PodOS)(unsafe.Pointer(in.OS))
out.SchedulingGates = *(*[]v1.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]v1.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
return nil
}
@ -6947,6 +7027,26 @@ func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(
return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s)
}
func autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_ResourceClaim_To_core_ResourceClaim is an autogenerated conversion function.
func Convert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
return autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in, out, s)
}
func autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_core_ResourceClaim_To_v1_ResourceClaim is an autogenerated conversion function.
func Convert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error {
return autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in, out, s)
}
func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error {
out.ContainerName = in.ContainerName
out.Resource = in.Resource
@ -7074,6 +7174,7 @@ func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.Resourc
func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error {
out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests))
out.Claims = *(*[]core.ResourceClaim)(unsafe.Pointer(&in.Claims))
return nil
}
@ -7085,6 +7186,7 @@ func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.Resourc
func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error {
out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests))
out.Claims = *(*[]v1.ResourceClaim)(unsafe.Pointer(&in.Claims))
return nil
}

View File

@ -309,7 +309,7 @@ func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList
// validateOverhead can be used to check whether the given Overhead is valid.
func validateOverhead(overhead core.ResourceList, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
// reuse the ResourceRequirements validation logic
return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, fldPath, opts)
return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts)
}
// Validates that given value is not negative.
@ -1621,12 +1621,12 @@ func validateEphemeralVolumeSource(ephemeral *core.EphemeralVolumeSource, fldPat
// ValidatePersistentVolumeClaimTemplate verifies that the embedded object meta and spec are valid.
// Checking of the object data is very minimal because only labels and annotations are used.
func ValidatePersistentVolumeClaimTemplate(claimTemplate *core.PersistentVolumeClaimTemplate, fldPath *field.Path, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := validatePersistentVolumeClaimTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
allErrs := ValidateTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&claimTemplate.Spec, fldPath.Child("spec"), opts)...)
return allErrs
}
func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
func ValidateTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
allErrs := apimachineryvalidation.ValidateAnnotations(objMeta.Annotations, fldPath.Child("annotations"))
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(objMeta.Labels, fldPath.Child("labels"))...)
// All other fields are not supported and thus must not be set
@ -1634,11 +1634,11 @@ func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta,
// but then adding a new one to ObjectMeta wouldn't be checked
// unless this code gets updated. Instead, we ensure that
// only allowed fields are set via reflection.
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedPVCTemplateObjectMetaFields, "cannot be set for an ephemeral volume", fldPath)...)
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedTemplateObjectMetaFields, "cannot be set", fldPath)...)
return allErrs
}
var allowedPVCTemplateObjectMetaFields = map[string]bool{
var allowedTemplateObjectMetaFields = map[string]bool{
"Annotations": true,
"Labels": true,
}
@ -2768,6 +2768,54 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str
return allErrs
}
func validatePodResourceClaims(claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
podClaimNames := sets.NewString()
for i, claim := range claims {
allErrs = append(allErrs, validatePodResourceClaim(claim, &podClaimNames, fldPath.Index(i))...)
}
return allErrs
}
// gatherPodResourceClaimNames returns a set of all non-empty
// PodResourceClaim.Name values. Validation that those names are valid is
// handled by validatePodResourceClaims.
func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.String {
podClaimNames := sets.String{}
for _, claim := range claims {
if claim.Name != "" {
podClaimNames.Insert(claim.Name)
}
}
return podClaimNames
}
func validatePodResourceClaim(claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if claim.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if podClaimNames.Has(claim.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), claim.Name))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(claim.Name, fldPath.Child("name"))...)
podClaimNames.Insert(claim.Name)
}
allErrs = append(allErrs, validatePodResourceClaimSource(claim.Source, fldPath.Child("source"))...)
return allErrs
}
func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if claimSource.ResourceClaimName != nil && claimSource.ResourceClaimTemplateName != nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "at most one of `resourceClaimName` or `resourceClaimTemplateName` may be specified"))
}
if claimSource.ResourceClaimName == nil && claimSource.ResourceClaimTemplateName == nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
}
return allErrs
}
func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -2990,8 +3038,8 @@ func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.Error
// validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers.
// Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates.
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
var allErrs field.ErrorList
if len(ephemeralContainers) == 0 {
return allErrs
@ -3011,7 +3059,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
idxPath := fldPath.Index(i)
c := (*core.Container)(&ec.EphemeralContainerCommon)
allErrs = append(allErrs, validateContainerCommon(c, volumes, idxPath, opts)...)
allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts)...)
// Ephemeral containers don't need looser constraints for pod templates, so it's convenient to apply both validations
// here where we've already converted EphemeralContainerCommon to Container.
allErrs = append(allErrs, validateContainerOnlyForPod(c, idxPath)...)
@ -3049,7 +3097,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
return allErrs
}
// validateFieldAcceptList checks that only allowed fields are set.
// ValidateFieldAcceptList checks that only allowed fields are set.
// The value must be a struct (not a pointer to a struct!).
func validateFieldAllowList(value interface{}, allowedFields map[string]bool, errorText string, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
@ -3073,7 +3121,7 @@ func validateFieldAllowList(value interface{}, allowedFields map[string]bool, er
}
// validateInitContainers is called by pod spec and template validation to validate the list of init containers
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
var allErrs field.ErrorList
allNames := sets.String{}
@ -3084,7 +3132,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
idxPath := fldPath.Index(i)
// Apply the validation common to all container types
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, idxPath, opts)...)
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts)...)
// Names must be unique within regular and init containers. Collisions with ephemeral containers
// will be detected by validateEphemeralContainers().
@ -3117,8 +3165,8 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
// validateContainerCommon applies validation common to all container types. It's called by regular, init, and ephemeral
// container list validation to require a properly formatted name, image, etc.
func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, path *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, path *field.Path, opts PodValidationOptions) field.ErrorList {
var allErrs field.ErrorList
namePath := path.Child("name")
if len(ctr.Name) == 0 {
@ -3154,7 +3202,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume
allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"))...)
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, path.Child("resources"), opts)...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"))...)
return allErrs
}
@ -3207,7 +3255,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList
}
// validateContainers is called by pod spec and template validation to validate the list of regular containers.
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if len(containers) == 0 {
@ -3219,7 +3267,7 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol
path := fldPath.Index(i)
// Apply validation common to all containers
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, path, opts)...)
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts)...)
// Container names must be unique within the list of regular containers.
// Collisions with init or ephemeral container names will be detected by the init or ephemeral
@ -3697,9 +3745,11 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts)
allErrs = append(allErrs, vErrs...)
allErrs = append(allErrs, validateContainers(spec.Containers, vols, fldPath.Child("containers"), opts)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"), opts)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, fldPath.Child("ephemeralContainers"), opts)...)
podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims)
allErrs = append(allErrs, validatePodResourceClaims(spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...)
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
@ -5856,7 +5906,7 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel
}
// Validates resource requirement spec.
func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func ValidateResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
limPath := fldPath.Child("limits")
reqPath := fldPath.Child("requests")
@ -5919,6 +5969,42 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
allErrs = append(allErrs, field.Forbidden(fldPath, "HugePages require cpu or memory"))
}
allErrs = append(allErrs, validateResourceClaimNames(requirements.Claims, podClaimNames, fldPath.Child("claims"))...)
return allErrs
}
// validateResourceClaimNames checks that the names in
// ResourceRequirements.Claims have a corresponding entry in
// PodSpec.ResourceClaims.
func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets.String, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
names := sets.String{}
for i, claim := range claims {
name := claim.Name
if name == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i), ""))
} else {
if names.Has(name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), name))
} else {
names.Insert(name)
}
if !podClaimNames.Has(name) {
// field.NotFound doesn't accept an
// explanation. Adding one here is more
// user-friendly.
error := field.NotFound(fldPath.Index(i), name)
error.Detail = "must be one of the names in pod.spec.resourceClaims"
if len(podClaimNames) == 0 {
error.Detail += " which is empty"
} else {
error.Detail += ": " + strings.Join(podClaimNames.List(), ", ")
}
allErrs = append(allErrs, error)
}
}
}
return allErrs
}

View File

@ -5385,7 +5385,7 @@ func TestAlphaLocalStorageCapacityIsolation(t *testing.T) {
resource.BinarySI),
},
}
if errs := ValidateResourceRequirements(&containerLimitCase, field.NewPath("resources"), PodValidationOptions{}); len(errs) != 0 {
if errs := ValidateResourceRequirements(&containerLimitCase, nil, field.NewPath("resources"), PodValidationOptions{}); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
@ -6855,7 +6855,7 @@ func TestValidateEphemeralContainers(t *testing.T) {
},
},
} {
if errs := validateEphemeralContainers(ephemeralContainers, containers, initContainers, vols, field.NewPath("ephemeralContainers"), PodValidationOptions{}); len(errs) != 0 {
if errs := validateEphemeralContainers(ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}); len(errs) != 0 {
t.Errorf("expected success for '%s' but got errors: %v", title, errs)
}
}
@ -7137,7 +7137,7 @@ func TestValidateEphemeralContainers(t *testing.T) {
for _, tc := range tcs {
t.Run(tc.title+"__@L"+tc.line, func(t *testing.T) {
errs := validateEphemeralContainers(tc.ephemeralContainers, containers, initContainers, vols, field.NewPath("ephemeralContainers"), PodValidationOptions{})
errs := validateEphemeralContainers(tc.ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{})
if len(errs) == 0 {
t.Fatal("expected error but received none")
}
@ -7416,7 +7416,7 @@ func TestValidateContainers(t *testing.T) {
TerminationMessagePolicy: "File",
},
}
if errs := validateContainers(successCase, volumeDevices, field.NewPath("field"), PodValidationOptions{}); len(errs) != 0 {
if errs := validateContainers(successCase, volumeDevices, nil, field.NewPath("field"), PodValidationOptions{}); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
@ -8040,7 +8040,7 @@ func TestValidateContainers(t *testing.T) {
}
for _, tc := range errorCases {
t.Run(tc.title+"__@L"+tc.line, func(t *testing.T) {
errs := validateContainers(tc.containers, volumeDevices, field.NewPath("containers"), PodValidationOptions{})
errs := validateContainers(tc.containers, volumeDevices, nil, field.NewPath("containers"), PodValidationOptions{})
if len(errs) == 0 {
t.Fatal("expected error but received none")
}
@ -8090,7 +8090,7 @@ func TestValidateInitContainers(t *testing.T) {
TerminationMessagePolicy: "File",
},
}
if errs := validateInitContainers(successCase, containers, volumeDevices, field.NewPath("field"), PodValidationOptions{}); len(errs) != 0 {
if errs := validateInitContainers(successCase, containers, volumeDevices, nil, field.NewPath("field"), PodValidationOptions{}); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
@ -8282,7 +8282,7 @@ func TestValidateInitContainers(t *testing.T) {
}
for _, tc := range errorCases {
t.Run(tc.title+"__@L"+tc.line, func(t *testing.T) {
errs := validateInitContainers(tc.initContainers, containers, volumeDevices, field.NewPath("initContainers"), PodValidationOptions{})
errs := validateInitContainers(tc.initContainers, containers, volumeDevices, nil, field.NewPath("initContainers"), PodValidationOptions{})
if len(errs) == 0 {
t.Fatal("expected error but received none")
}
@ -18625,6 +18625,9 @@ func TestValidateOSFields(t *testing.T) {
"Priority",
"PriorityClassName",
"ReadinessGates",
"ResourceClaims[*].Name",
"ResourceClaims[*].Source.ResourceClaimName",
"ResourceClaims[*].Source.ResourceClaimTemplateName",
"RestartPolicy",
"RuntimeClassName",
"SchedulerName",
@ -20928,7 +20931,7 @@ func TestValidateResourceRequirements(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if errs := ValidateResourceRequirements(&tc.requirements, path, tc.opts); len(errs) != 0 {
if errs := ValidateResourceRequirements(&tc.requirements, nil, path, tc.opts); len(errs) != 0 {
t.Errorf("unexpected errors: %v", errs)
}
})
@ -20955,7 +20958,7 @@ func TestValidateResourceRequirements(t *testing.T) {
for _, tc := range errTests {
t.Run(tc.name, func(t *testing.T) {
if errs := ValidateResourceRequirements(&tc.requirements, path, tc.opts); len(errs) == 0 {
if errs := ValidateResourceRequirements(&tc.requirements, nil, path, tc.opts); len(errs) == 0 {
t.Error("expected errors")
}
})
@ -21695,3 +21698,220 @@ func TestValidatePVSecretReference(t *testing.T) {
})
}
}
func TestValidateDynamicResourceAllocation(t *testing.T) {
externalClaimName := "some-claim"
externalClaimTemplateName := "some-claim-template"
goodClaimSource := core.ClaimSource{
ResourceClaimName: &externalClaimName,
}
successCases := map[string]core.PodSpec{
"resource claim reference": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: core.ClaimSource{
ResourceClaimName: &externalClaimName,
},
},
},
},
"resource claim template": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: core.ClaimSource{
ResourceClaimTemplateName: &externalClaimTemplateName,
},
},
},
},
"multiple claims": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}, {Name: "another-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
{
Name: "another-claim",
Source: goodClaimSource,
},
},
},
"init container": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
InitContainers: []core.Container{{Name: "ctr-init", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
},
},
}
for k, v := range successCases {
t.Run(k, func(t *testing.T) {
if errs := ValidatePodSpec(&v, nil, field.NewPath("field"), PodValidationOptions{}); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
})
}
failureCases := map[string]core.PodSpec{
"pod claim name with prefix": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "../my-claim",
Source: goodClaimSource,
},
},
},
"pod claim name with path": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my/claim",
Source: goodClaimSource,
},
},
},
"pod claim name empty": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "",
Source: goodClaimSource,
},
},
},
"duplicate pod claim entries": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
{
Name: "my-claim",
Source: goodClaimSource,
},
},
},
"resource claim source empty": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: core.ClaimSource{},
},
},
},
"resource claim reference and template": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: core.ClaimSource{
ResourceClaimName: &externalClaimName,
ResourceClaimTemplateName: &externalClaimTemplateName,
},
},
},
},
"claim not found": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "no-such-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
},
},
"claim name empty": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: ""}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
},
},
"pod claim name duplicates": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}, {Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
},
},
"no claims defined": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
},
"duplicate pod claim name": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
{
Name: "my-claim",
Source: goodClaimSource,
},
},
},
"ephemeral container don't support resource requirements": {
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}},
EphemeralContainers: []core.EphemeralContainer{{EphemeralContainerCommon: core.EphemeralContainerCommon{Name: "ctr-ephemeral", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File", Resources: core.ResourceRequirements{Claims: []core.ResourceClaim{{Name: "my-claim"}}}}, TargetContainerName: "ctr"}},
RestartPolicy: core.RestartPolicyAlways,
DNSPolicy: core.DNSClusterFirst,
ResourceClaims: []core.PodResourceClaim{
{
Name: "my-claim",
Source: goodClaimSource,
},
},
},
}
for k, v := range failureCases {
if errs := ValidatePodSpec(&v, nil, field.NewPath("field"), PodValidationOptions{}); len(errs) == 0 {
t.Errorf("expected failure for %q", k)
}
}
}

View File

@ -419,6 +419,32 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClaimSource) DeepCopyInto(out *ClaimSource) {
*out = *in
if in.ResourceClaimName != nil {
in, out := &in.ResourceClaimName, &out.ResourceClaimName
*out = new(string)
**out = **in
}
if in.ResourceClaimTemplateName != nil {
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource.
func (in *ClaimSource) DeepCopy() *ClaimSource {
if in == nil {
return nil
}
out := new(ClaimSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
*out = *in
@ -3728,6 +3754,23 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim.
func (in *PodResourceClaim) DeepCopy() *PodResourceClaim {
if in == nil {
return nil
}
out := new(PodResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) {
*out = *in
@ -3982,6 +4025,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = make([]PodSchedulingGate, len(*in))
copy(*out, *in)
}
if in.ResourceClaims != nil {
in, out := &in.ResourceClaims, &out.ResourceClaims
*out = make([]PodResourceClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -4560,6 +4610,22 @@ func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
func (in *ResourceClaim) DeepCopy() *ResourceClaim {
if in == nil {
return nil
}
out := new(ResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) {
*out = *in
@ -4740,6 +4806,11 @@ func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
(*out)[key] = val.DeepCopy()
}
}
if in.Claims != nil {
in, out := &in.Claims, &out.Claims
*out = make([]ResourceClaim, len(*in))
copy(*out, *in)
}
return
}

View File

@ -54,7 +54,7 @@ func ValidateRuntimeClassUpdate(new, old *node.RuntimeClass) field.ErrorList {
func validateOverhead(overhead *node.Overhead, fldPath *field.Path) field.ErrorList {
// reuse the ResourceRequirements validation logic
return corevalidation.ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead.PodFixed}, fldPath,
return corevalidation.ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead.PodFixed}, nil, fldPath,
corevalidation.PodValidationOptions{})
}

6
pkg/apis/resource/OWNERS Normal file
View File

@ -0,0 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- bart0sh
- klueska
- pohly

21
pkg/apis/resource/doc.go Normal file
View File

@ -0,0 +1,21 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// Package resource contains the latest (or "internal") version of the
// Kubernetes resource API objects.
package resource // import "k8s.io/kubernetes/pkg/apis/resource"

View File

@ -0,0 +1,40 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
fuzz "github.com/google/gofuzz"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/resource"
)
// Funcs contains the fuzzer functions for the resource group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *resource.ResourceClaimSpec, c fuzz.Continue) {
c.FuzzNoCustom(obj) // fuzz self without calling this function again
// Custom fuzzing for allocation mode: pick one valid mode randomly.
modes := []resource.AllocationMode{
resource.AllocationModeImmediate,
resource.AllocationModeWaitForFirstConsumer,
}
obj.AllocationMode = modes[c.Rand.Intn(len(modes))]
},
}
}

View File

@ -0,0 +1,38 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the resource API, making it available as an
// option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/apis/resource/v1alpha1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(resource.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion))
}

View File

@ -0,0 +1,75 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
"encoding/json"
"reflect"
"testing"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/api/legacyscheme"
internal "k8s.io/kubernetes/pkg/apis/resource"
)
func TestResourceVersioner(t *testing.T) {
claim := internal.ResourceClaim{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "10"}}
version, err := meta.NewAccessor().ResourceVersion(&claim)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if version != "10" {
t.Errorf("unexpected version %v", version)
}
claimList := internal.ResourceClaimList{ListMeta: metav1.ListMeta{ResourceVersion: "10"}}
version, err = meta.NewAccessor().ResourceVersion(&claimList)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if version != "10" {
t.Errorf("unexpected version %v", version)
}
}
func TestCodec(t *testing.T) {
claim := internal.ResourceClaim{}
data, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: "resource.k8s.io", Version: "v1alpha1"}), &claim)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
other := internal.ResourceClaim{}
if err := json.Unmarshal(data, &other); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if other.APIVersion != "resource.k8s.io/v1alpha1" || other.Kind != "ResourceClaim" {
t.Errorf("unexpected unmarshalled object %#v", other)
}
}
func TestUnversioned(t *testing.T) {
for _, obj := range []runtime.Object{
&metav1.Status{},
} {
if unversioned, ok := legacyscheme.Scheme.IsUnversioned(obj); !unversioned || !ok {
t.Errorf("%v is expected to be unversioned", reflect.TypeOf(obj))
}
}
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "resource.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder object to register various known types
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme represents a func that can be used to apply all the registered
// funcs in a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
scheme.AddKnownTypes(SchemeGroupVersion,
&ResourceClass{},
&ResourceClassList{},
&ResourceClaim{},
&ResourceClaimList{},
&ResourceClaimTemplate{},
&ResourceClaimTemplateList{},
&PodScheduling{},
&PodSchedulingList{},
)
return nil
}

404
pkg/apis/resource/types.go Normal file
View File

@ -0,0 +1,404 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClaim describes which resources are needed by a resource consumer.
// Its status tracks whether the resource has been allocated and what the
// resulting attributes are.
//
// This is an alpha type and requires enabling the DynamicResourceAllocation
// feature gate.
type ResourceClaim struct {
metav1.TypeMeta
// Standard object metadata
// +optional
metav1.ObjectMeta
// Spec describes the desired attributes of a resource that then needs
// to be allocated. It can only be set once when creating the
// ResourceClaim.
Spec ResourceClaimSpec
// Status describes whether the resource is available and with which
// attributes.
// +optional
Status ResourceClaimStatus
}
// ResourceClaimSpec defines how a resource is to be allocated.
type ResourceClaimSpec struct {
// ResourceClassName references the driver and additional parameters
// via the name of a ResourceClass that was created as part of the
// driver deployment.
ResourceClassName string
// ParametersRef references a separate object with arbitrary parameters
// that will be used by the driver when allocating a resource for the
// claim.
//
// The object must be in the same namespace as the ResourceClaim.
// +optional
ParametersRef *ResourceClaimParametersReference
// Allocation can start immediately or when a Pod wants to use the
// resource. "WaitForFirstConsumer" is the default.
// +optional
AllocationMode AllocationMode
}
// AllocationMode describes whether a ResourceClaim gets allocated immediately
// when it gets created (AllocationModeImmediate) or whether allocation is
// delayed until it is needed for a Pod
// (AllocationModeWaitForFirstConsumer). Other modes might get added in the
// future.
type AllocationMode string
const (
// When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is
// delayed until a Pod gets scheduled that needs the ResourceClaim. The
// scheduler will consider all resource requirements of that Pod and
// trigger allocation for a node that fits the Pod.
AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer"
// When a ResourceClaim has AllocationModeImmediate, allocation starts
// as soon as the ResourceClaim gets created. This is done without
// considering the needs of Pods that will use the ResourceClaim
// because those Pods are not known yet.
AllocationModeImmediate AllocationMode = "Immediate"
)
// ResourceClaimStatus tracks whether the resource has been allocated and what
// the resulting attributes are.
type ResourceClaimStatus struct {
// DriverName is a copy of the driver name from the ResourceClass at
// the time when allocation started.
// +optional
DriverName string
// Allocation is set by the resource driver once a resource has been
// allocated successfully. If this is not specified, the resource is
// not yet allocated.
// +optional
Allocation *AllocationResult
// ReservedFor indicates which entities are currently allowed to use
// the claim. A Pod which references a ResourceClaim which is not
// reserved for that Pod will not be started.
//
// There can be at most 32 such reservations. This may get increased in
// the future, but not reduced.
// +optional
ReservedFor []ResourceClaimConsumerReference
// DeallocationRequested indicates that a ResourceClaim is to be
// deallocated.
//
// The driver then must deallocate this claim and reset the field
// together with clearing the Allocation field.
//
// While DeallocationRequested is set, no new consumers may be added to
// ReservedFor.
// +optional
DeallocationRequested bool
}
// ReservedForMaxSize is the maximum number of entries in
// claim.status.reservedFor.
const ResourceClaimReservedForMaxSize = 32
// AllocationResult contains attributed of an allocated resource.
type AllocationResult struct {
// ResourceHandle contains arbitrary data returned by the driver after a
// successful allocation. This is opaque for
// Kubernetes. Driver documentation may explain to users how to
// interpret this data if needed.
//
// The maximum size of this field is 16KiB. This may get
// increased in the future, but not reduced.
// +optional
ResourceHandle string
// This field will get set by the resource driver after it has
// allocated the resource driver to inform the scheduler where it can
// schedule Pods using the ResourceClaim.
//
// Setting this field is optional. If null, the resource is available
// everywhere.
// +optional
AvailableOnNodes *core.NodeSelector
// Shareable determines whether the resource supports more
// than one consumer at a time.
// +optional
Shareable bool
}
// ResourceHandleMaxSize is the maximum size of allocation.resourceHandle.
const ResourceHandleMaxSize = 16 * 1024
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClaimList is a collection of claims.
type ResourceClaimList struct {
metav1.TypeMeta
// Standard list metadata
// +optional
metav1.ListMeta
// Items is the list of resource claims.
Items []ResourceClaim
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodScheduling objects hold information that is needed to schedule
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
// mode.
//
// This is an alpha type and requires enabling the DynamicResourceAllocation
// feature gate.
type PodScheduling struct {
metav1.TypeMeta
// Standard object metadata
// +optional
metav1.ObjectMeta
// Spec describes where resources for the Pod are needed.
Spec PodSchedulingSpec
// Status describes where resources for the Pod can be allocated.
Status PodSchedulingStatus
}
// PodSchedulingSpec describes where resources for the Pod are needed.
type PodSchedulingSpec struct {
// SelectedNode is the node for which allocation of ResourceClaims that
// are referenced by the Pod and that use "WaitForFirstConsumer"
// allocation is to be attempted.
SelectedNode string
// PotentialNodes lists nodes where the Pod might be able to run.
//
// The size of this field is limited to 128. This is large enough for
// many clusters. Larger clusters may need more attempts to find a node
// that suits all pending resources. This may get increased in the
// future, but not reduced.
// +optional
PotentialNodes []string
}
// PodSchedulingStatus describes where resources for the Pod can be allocated.
type PodSchedulingStatus struct {
// ResourceClaims describes resource availability for each
// pod.spec.resourceClaim entry where the corresponding ResourceClaim
// uses "WaitForFirstConsumer" allocation mode.
// +optional
ResourceClaims []ResourceClaimSchedulingStatus
// If there ever is a need to support other kinds of resources
// than ResourceClaim, then new fields could get added here
// for those other resources.
}
// ResourceClaimSchedulingStatus contains information about one particular
// ResourceClaim with "WaitForFirstConsumer" allocation mode.
type ResourceClaimSchedulingStatus struct {
// Name matches the pod.spec.resourceClaims[*].Name field.
Name string
// UnsuitableNodes lists nodes that the ResourceClaim cannot be
// allocated for.
//
// The size of this field is limited to 128, the same as for
// PodSchedulingSpec.PotentialNodes. This may get increased in the
// future, but not reduced.
// +optional
UnsuitableNodes []string
}
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
// node lists that are stored in PodScheduling objects. This limit is part
// of the API.
const PodSchedulingNodeListMaxSize = 128
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSchedulingList is a collection of Pod scheduling objects.
type PodSchedulingList struct {
metav1.TypeMeta
// Standard list metadata
// +optional
metav1.ListMeta
// Items is the list of PodScheduling objects.
Items []PodScheduling
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClass is used by administrators to influence how resources
// are allocated.
//
// This is an alpha type and requires enabling the DynamicResourceAllocation
// feature gate.
type ResourceClass struct {
metav1.TypeMeta
// Standard object metadata
// +optional
metav1.ObjectMeta
// DriverName defines the name of the dynamic resource driver that is
// used for allocation of a ResourceClaim that uses this class.
//
// Resource drivers have a unique name in forward domain order
// (acme.example.com).
DriverName string
// ParametersRef references an arbitrary separate object that may hold
// parameters that will be used by the driver when allocating a
// resource that uses this class. A dynamic resource driver can
// distinguish between parameters stored here and and those stored in
// ResourceClaimSpec.
// +optional
ParametersRef *ResourceClassParametersReference
// Only nodes matching the selector will be considered by the scheduler
// when trying to find a Node that fits a Pod when that Pod uses
// a ResourceClaim that has not been allocated yet.
//
// Setting this field is optional. If null, all nodes are candidates.
// +optional
SuitableNodes *core.NodeSelector
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClassList is a collection of classes.
type ResourceClassList struct {
metav1.TypeMeta
// Standard list metadata
// +optional
metav1.ListMeta
// Items is the list of resource classes.
Items []ResourceClass
}
// ResourceClassParametersReference contains enough information to let you
// locate the parameters for a ResourceClass.
type ResourceClassParametersReference struct {
// APIGroup is the group for the resource being referenced. It is
// empty for the core API. This matches the group in the APIVersion
// that is used when creating the resources.
// +optional
APIGroup string
// Kind is the type of resource being referenced. This is the same
// value as in the parameter object's metadata.
Kind string
// Name is the name of resource being referenced.
Name string
// Namespace that contains the referenced resource. Must be empty
// for cluster-scoped resources and non-empty for namespaced
// resources.
// +optional
Namespace string
}
// ResourceClaimParametersReference contains enough information to let you
// locate the parameters for a ResourceClaim. The object must be in the same
// namespace as the ResourceClaim.
type ResourceClaimParametersReference struct {
// APIGroup is the group for the resource being referenced. It is
// empty for the core API. This matches the group in the APIVersion
// that is used when creating the resources.
// +optional
APIGroup string
// Kind is the type of resource being referenced. This is the same
// value as in the parameter object's metadata, for example "ConfigMap".
Kind string
// Name is the name of resource being referenced.
Name string
}
// ResourceClaimConsumerReference contains enough information to let you
// locate the consumer of a ResourceClaim. The user must be a resource in the same
// namespace as the ResourceClaim.
type ResourceClaimConsumerReference struct {
// APIGroup is the group for the resource being referenced. It is
// empty for the core API. This matches the group in the APIVersion
// that is used when creating the resources.
// +optional
APIGroup string
// Resource is the type of resource being referenced, for example "pods".
Resource string
// Name is the name of resource being referenced.
Name string
// UID identifies exactly one incarnation of the resource.
UID types.UID
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClaimTemplate is used to produce ResourceClaim objects.
type ResourceClaimTemplate struct {
metav1.TypeMeta
// Standard object metadata
// +optional
metav1.ObjectMeta
// Describes the ResourceClaim that is to be generated.
//
// This field is immutable. A ResourceClaim will get created by the
// control plane for a Pod when needed and then not get updated
// anymore.
Spec ResourceClaimTemplateSpec
}
// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
type ResourceClaimTemplateSpec struct {
// ObjectMeta may contain labels and annotations that will be copied into the PVC
// when creating it. No other fields are allowed and will be rejected during
// validation.
// +optional
metav1.ObjectMeta
// Spec for the ResourceClaim. The entire content is copied unchanged
// into the ResourceClaim that gets created from this template. The
// same fields as in a ResourceClaim are also valid here.
Spec ResourceClaimSpec
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClaimTemplateList is a collection of claim templates.
type ResourceClaimTemplateList struct {
metav1.TypeMeta
// Standard list metadata
// +optional
metav1.ListMeta
// Items is the list of resource claim templates.
Items []ResourceClaimTemplate
}

View File

@ -0,0 +1,25 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
return nil
}

View File

@ -0,0 +1,32 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/api/resource/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ResourceClaimSpec(obj *v1alpha1.ResourceClaimSpec) {
if obj.AllocationMode == "" {
obj.AllocationMode = v1alpha1.AllocationModeWaitForFirstConsumer
}
}

View File

@ -0,0 +1,75 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1_test
import (
"reflect"
"testing"
v1alpha1 "k8s.io/api/resource/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
// ensure types are installed
"k8s.io/kubernetes/pkg/api/legacyscheme"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
)
func TestSetDefaultAllocationMode(t *testing.T) {
claim := &v1alpha1.ResourceClaim{}
// field should be defaulted
defaultMode := v1alpha1.AllocationModeWaitForFirstConsumer
output := roundTrip(t, runtime.Object(claim)).(*v1alpha1.ResourceClaim)
outMode := output.Spec.AllocationMode
if outMode != defaultMode {
t.Errorf("Expected AllocationMode to be defaulted to: %+v, got: %+v", defaultMode, outMode)
}
// field should not change
nonDefaultMode := v1alpha1.AllocationModeImmediate
claim = &v1alpha1.ResourceClaim{
Spec: v1alpha1.ResourceClaimSpec{
AllocationMode: nonDefaultMode,
},
}
output = roundTrip(t, runtime.Object(claim)).(*v1alpha1.ResourceClaim)
outMode = output.Spec.AllocationMode
if outMode != v1alpha1.AllocationModeImmediate {
t.Errorf("Expected AllocationMode to remain %+v, got: %+v", nonDefaultMode, outMode)
}
}
func roundTrip(t *testing.T, obj runtime.Object) runtime.Object {
codec := legacyscheme.Codecs.LegacyCodec(v1alpha1.SchemeGroupVersion)
data, err := runtime.Encode(codec, obj)
if err != nil {
t.Errorf("%v\n %#v", err, obj)
return nil
}
obj2, err := runtime.Decode(codec, data)
if err != nil {
t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj)
return nil
}
obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object)
err = legacyscheme.Scheme.Convert(obj2, obj3, nil)
if err != nil {
t.Errorf("%v\nSource: %#v", err, obj2)
return nil
}
return obj3
}

View File

@ -0,0 +1,23 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/resource
// +k8s:conversion-gen-external-types=k8s.io/api/resource/v1alpha1
// +k8s:defaulter-gen=TypeMeta
// +k8s:defaulter-gen-input=k8s.io/api/resource/v1alpha1
// Package v1alpha1 is the v1alpha1 version of the resource API.
package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/resource/v1alpha1"

View File

@ -0,0 +1,46 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/api/resource/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
localSchemeBuilder = &v1alpha1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
// TODO: remove these global variables
// GroupName is the group name use in this package
const GroupName = "resource.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

View File

@ -0,0 +1,668 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
v1alpha1 "k8s.io/api/resource/v1alpha1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
resource "k8s.io/kubernetes/pkg/apis/resource"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*v1alpha1.AllocationResult)(nil), (*resource.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_AllocationResult_To_resource_AllocationResult(a.(*v1alpha1.AllocationResult), b.(*resource.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.AllocationResult)(nil), (*v1alpha1.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_AllocationResult_To_v1alpha1_AllocationResult(a.(*resource.AllocationResult), b.(*v1alpha1.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.PodScheduling)(nil), (*resource.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodScheduling_To_resource_PodScheduling(a.(*v1alpha1.PodScheduling), b.(*resource.PodScheduling), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.PodScheduling)(nil), (*v1alpha1.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodScheduling_To_v1alpha1_PodScheduling(a.(*resource.PodScheduling), b.(*v1alpha1.PodScheduling), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.PodSchedulingList)(nil), (*resource.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(a.(*v1alpha1.PodSchedulingList), b.(*resource.PodSchedulingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingList)(nil), (*v1alpha1.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(a.(*resource.PodSchedulingList), b.(*v1alpha1.PodSchedulingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.PodSchedulingSpec)(nil), (*resource.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(a.(*v1alpha1.PodSchedulingSpec), b.(*resource.PodSchedulingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingSpec)(nil), (*v1alpha1.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(a.(*resource.PodSchedulingSpec), b.(*v1alpha1.PodSchedulingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.PodSchedulingStatus)(nil), (*resource.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(a.(*v1alpha1.PodSchedulingStatus), b.(*resource.PodSchedulingStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingStatus)(nil), (*v1alpha1.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(a.(*resource.PodSchedulingStatus), b.(*v1alpha1.PodSchedulingStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaim)(nil), (*resource.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(a.(*v1alpha1.ResourceClaim), b.(*resource.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaim)(nil), (*v1alpha1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(a.(*resource.ResourceClaim), b.(*v1alpha1.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimConsumerReference)(nil), (*resource.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(a.(*v1alpha1.ResourceClaimConsumerReference), b.(*resource.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimConsumerReference)(nil), (*v1alpha1.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(a.(*resource.ResourceClaimConsumerReference), b.(*v1alpha1.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimList)(nil), (*resource.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(a.(*v1alpha1.ResourceClaimList), b.(*resource.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimList)(nil), (*v1alpha1.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(a.(*resource.ResourceClaimList), b.(*v1alpha1.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimParametersReference)(nil), (*resource.ResourceClaimParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(a.(*v1alpha1.ResourceClaimParametersReference), b.(*resource.ResourceClaimParametersReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimParametersReference)(nil), (*v1alpha1.ResourceClaimParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(a.(*resource.ResourceClaimParametersReference), b.(*v1alpha1.ResourceClaimParametersReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimSchedulingStatus)(nil), (*resource.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(a.(*v1alpha1.ResourceClaimSchedulingStatus), b.(*resource.ResourceClaimSchedulingStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSchedulingStatus)(nil), (*v1alpha1.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(a.(*resource.ResourceClaimSchedulingStatus), b.(*v1alpha1.ResourceClaimSchedulingStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimSpec)(nil), (*resource.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(a.(*v1alpha1.ResourceClaimSpec), b.(*resource.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSpec)(nil), (*v1alpha1.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(a.(*resource.ResourceClaimSpec), b.(*v1alpha1.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimStatus)(nil), (*resource.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(a.(*v1alpha1.ResourceClaimStatus), b.(*resource.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimStatus)(nil), (*v1alpha1.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(a.(*resource.ResourceClaimStatus), b.(*v1alpha1.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimTemplate)(nil), (*resource.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(a.(*v1alpha1.ResourceClaimTemplate), b.(*resource.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplate)(nil), (*v1alpha1.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(a.(*resource.ResourceClaimTemplate), b.(*v1alpha1.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimTemplateList)(nil), (*resource.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(a.(*v1alpha1.ResourceClaimTemplateList), b.(*resource.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateList)(nil), (*v1alpha1.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(a.(*resource.ResourceClaimTemplateList), b.(*v1alpha1.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClaimTemplateSpec)(nil), (*resource.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(a.(*v1alpha1.ResourceClaimTemplateSpec), b.(*resource.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateSpec)(nil), (*v1alpha1.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(a.(*resource.ResourceClaimTemplateSpec), b.(*v1alpha1.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClass)(nil), (*resource.ResourceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClass_To_resource_ResourceClass(a.(*v1alpha1.ResourceClass), b.(*resource.ResourceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClass)(nil), (*v1alpha1.ResourceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClass_To_v1alpha1_ResourceClass(a.(*resource.ResourceClass), b.(*v1alpha1.ResourceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClassList)(nil), (*resource.ResourceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(a.(*v1alpha1.ResourceClassList), b.(*resource.ResourceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClassList)(nil), (*v1alpha1.ResourceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(a.(*resource.ResourceClassList), b.(*v1alpha1.ResourceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha1.ResourceClassParametersReference)(nil), (*resource.ResourceClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(a.(*v1alpha1.ResourceClassParametersReference), b.(*resource.ResourceClassParametersReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClassParametersReference)(nil), (*v1alpha1.ResourceClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(a.(*resource.ResourceClassParametersReference), b.(*v1alpha1.ResourceClassParametersReference), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_AllocationResult_To_resource_AllocationResult(in *v1alpha1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
out.ResourceHandle = in.ResourceHandle
out.AvailableOnNodes = (*core.NodeSelector)(unsafe.Pointer(in.AvailableOnNodes))
out.Shareable = in.Shareable
return nil
}
// Convert_v1alpha1_AllocationResult_To_resource_AllocationResult is an autogenerated conversion function.
func Convert_v1alpha1_AllocationResult_To_resource_AllocationResult(in *v1alpha1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
return autoConvert_v1alpha1_AllocationResult_To_resource_AllocationResult(in, out, s)
}
func autoConvert_resource_AllocationResult_To_v1alpha1_AllocationResult(in *resource.AllocationResult, out *v1alpha1.AllocationResult, s conversion.Scope) error {
out.ResourceHandle = in.ResourceHandle
out.AvailableOnNodes = (*v1.NodeSelector)(unsafe.Pointer(in.AvailableOnNodes))
out.Shareable = in.Shareable
return nil
}
// Convert_resource_AllocationResult_To_v1alpha1_AllocationResult is an autogenerated conversion function.
func Convert_resource_AllocationResult_To_v1alpha1_AllocationResult(in *resource.AllocationResult, out *v1alpha1.AllocationResult, s conversion.Scope) error {
return autoConvert_resource_AllocationResult_To_v1alpha1_AllocationResult(in, out, s)
}
func autoConvert_v1alpha1_PodScheduling_To_resource_PodScheduling(in *v1alpha1.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_PodScheduling_To_resource_PodScheduling is an autogenerated conversion function.
func Convert_v1alpha1_PodScheduling_To_resource_PodScheduling(in *v1alpha1.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error {
return autoConvert_v1alpha1_PodScheduling_To_resource_PodScheduling(in, out, s)
}
func autoConvert_resource_PodScheduling_To_v1alpha1_PodScheduling(in *resource.PodScheduling, out *v1alpha1.PodScheduling, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_resource_PodScheduling_To_v1alpha1_PodScheduling is an autogenerated conversion function.
func Convert_resource_PodScheduling_To_v1alpha1_PodScheduling(in *resource.PodScheduling, out *v1alpha1.PodScheduling, s conversion.Scope) error {
return autoConvert_resource_PodScheduling_To_v1alpha1_PodScheduling(in, out, s)
}
func autoConvert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha1.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.PodScheduling)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList is an autogenerated conversion function.
func Convert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha1.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error {
return autoConvert_v1alpha1_PodSchedulingList_To_resource_PodSchedulingList(in, out, s)
}
func autoConvert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha1.PodSchedulingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]v1alpha1.PodScheduling)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList is an autogenerated conversion function.
func Convert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha1.PodSchedulingList, s conversion.Scope) error {
return autoConvert_resource_PodSchedulingList_To_v1alpha1_PodSchedulingList(in, out, s)
}
func autoConvert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha1.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error {
out.SelectedNode = in.SelectedNode
out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes))
return nil
}
// Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec is an autogenerated conversion function.
func Convert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha1.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_PodSchedulingSpec_To_resource_PodSchedulingSpec(in, out, s)
}
func autoConvert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha1.PodSchedulingSpec, s conversion.Scope) error {
out.SelectedNode = in.SelectedNode
out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes))
return nil
}
// Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec is an autogenerated conversion function.
func Convert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha1.PodSchedulingSpec, s conversion.Scope) error {
return autoConvert_resource_PodSchedulingSpec_To_v1alpha1_PodSchedulingSpec(in, out, s)
}
func autoConvert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha1.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error {
out.ResourceClaims = *(*[]resource.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims))
return nil
}
// Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus is an autogenerated conversion function.
func Convert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha1.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_PodSchedulingStatus_To_resource_PodSchedulingStatus(in, out, s)
}
func autoConvert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha1.PodSchedulingStatus, s conversion.Scope) error {
out.ResourceClaims = *(*[]v1alpha1.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims))
return nil
}
// Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus is an autogenerated conversion function.
func Convert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha1.PodSchedulingStatus, s conversion.Scope) error {
return autoConvert_resource_PodSchedulingStatus_To_v1alpha1_PodSchedulingStatus(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(in *v1alpha1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ResourceClaim_To_resource_ResourceClaim is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(in *v1alpha1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaim_To_resource_ResourceClaim(in, out, s)
}
func autoConvert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(in *resource.ResourceClaim, out *v1alpha1.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaim_To_v1alpha1_ResourceClaim is an autogenerated conversion function.
func Convert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(in *resource.ResourceClaim, out *v1alpha1.ResourceClaim, s conversion.Scope) error {
return autoConvert_resource_ResourceClaim_To_v1alpha1_ResourceClaim(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *v1alpha1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *v1alpha1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *v1alpha1.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *v1alpha1.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimConsumerReference_To_v1alpha1_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(in *v1alpha1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(in *v1alpha1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimList_To_resource_ResourceClaimList(in, out, s)
}
func autoConvert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(in *resource.ResourceClaimList, out *v1alpha1.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]v1alpha1.ResourceClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList is an autogenerated conversion function.
func Convert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(in *resource.ResourceClaimList, out *v1alpha1.ResourceClaimList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimList_To_v1alpha1_ResourceClaimList(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in *v1alpha1.ResourceClaimParametersReference, out *resource.ResourceClaimParametersReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in *v1alpha1.ResourceClaimParametersReference, out *resource.ResourceClaimParametersReference, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimParametersReference_To_resource_ResourceClaimParametersReference(in, out, s)
}
func autoConvert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(in *resource.ResourceClaimParametersReference, out *v1alpha1.ResourceClaimParametersReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference is an autogenerated conversion function.
func Convert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(in *resource.ResourceClaimParametersReference, out *v1alpha1.ResourceClaimParametersReference, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimParametersReference_To_v1alpha1_ResourceClaimParametersReference(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *v1alpha1.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error {
out.Name = in.Name
out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes))
return nil
}
// Convert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *v1alpha1.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in, out, s)
}
func autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *v1alpha1.ResourceClaimSchedulingStatus, s conversion.Scope) error {
out.Name = in.Name
out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes))
return nil
}
// Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus is an autogenerated conversion function.
func Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *v1alpha1.ResourceClaimSchedulingStatus, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha1_ResourceClaimSchedulingStatus(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *v1alpha1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
out.ResourceClassName = in.ResourceClassName
out.ParametersRef = (*resource.ResourceClaimParametersReference)(unsafe.Pointer(in.ParametersRef))
out.AllocationMode = resource.AllocationMode(in.AllocationMode)
return nil
}
// Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *v1alpha1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *v1alpha1.ResourceClaimSpec, s conversion.Scope) error {
out.ResourceClassName = in.ResourceClassName
out.ParametersRef = (*v1alpha1.ResourceClaimParametersReference)(unsafe.Pointer(in.ParametersRef))
out.AllocationMode = v1alpha1.AllocationMode(in.AllocationMode)
return nil
}
// Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *v1alpha1.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *v1alpha1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
out.DriverName = in.DriverName
out.Allocation = (*resource.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]resource.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.DeallocationRequested = in.DeallocationRequested
return nil
}
// Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *v1alpha1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in, out, s)
}
func autoConvert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *v1alpha1.ResourceClaimStatus, s conversion.Scope) error {
out.DriverName = in.DriverName
out.Allocation = (*v1alpha1.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]v1alpha1.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.DeallocationRequested = in.DeallocationRequested
return nil
}
// Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus is an autogenerated conversion function.
func Convert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *v1alpha1.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimStatus_To_v1alpha1_ResourceClaimStatus(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *v1alpha1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *v1alpha1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *v1alpha1.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *v1alpha1.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplate_To_v1alpha1_ResourceClaimTemplate(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *v1alpha1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceClaimTemplate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *v1alpha1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *v1alpha1.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]v1alpha1.ResourceClaimTemplate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *v1alpha1.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateList_To_v1alpha1_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *v1alpha1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *v1alpha1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *v1alpha1.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1alpha1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *v1alpha1.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateSpec_To_v1alpha1_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_v1alpha1_ResourceClass_To_resource_ResourceClass(in *v1alpha1.ResourceClass, out *resource.ResourceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.ParametersRef = (*resource.ResourceClassParametersReference)(unsafe.Pointer(in.ParametersRef))
out.SuitableNodes = (*core.NodeSelector)(unsafe.Pointer(in.SuitableNodes))
return nil
}
// Convert_v1alpha1_ResourceClass_To_resource_ResourceClass is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClass_To_resource_ResourceClass(in *v1alpha1.ResourceClass, out *resource.ResourceClass, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClass_To_resource_ResourceClass(in, out, s)
}
func autoConvert_resource_ResourceClass_To_v1alpha1_ResourceClass(in *resource.ResourceClass, out *v1alpha1.ResourceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.ParametersRef = (*v1alpha1.ResourceClassParametersReference)(unsafe.Pointer(in.ParametersRef))
out.SuitableNodes = (*v1.NodeSelector)(unsafe.Pointer(in.SuitableNodes))
return nil
}
// Convert_resource_ResourceClass_To_v1alpha1_ResourceClass is an autogenerated conversion function.
func Convert_resource_ResourceClass_To_v1alpha1_ResourceClass(in *resource.ResourceClass, out *v1alpha1.ResourceClass, s conversion.Scope) error {
return autoConvert_resource_ResourceClass_To_v1alpha1_ResourceClass(in, out, s)
}
func autoConvert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(in *v1alpha1.ResourceClassList, out *resource.ResourceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_ResourceClassList_To_resource_ResourceClassList is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(in *v1alpha1.ResourceClassList, out *resource.ResourceClassList, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClassList_To_resource_ResourceClassList(in, out, s)
}
func autoConvert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(in *resource.ResourceClassList, out *v1alpha1.ResourceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]v1alpha1.ResourceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceClassList_To_v1alpha1_ResourceClassList is an autogenerated conversion function.
func Convert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(in *resource.ResourceClassList, out *v1alpha1.ResourceClassList, s conversion.Scope) error {
return autoConvert_resource_ResourceClassList_To_v1alpha1_ResourceClassList(in, out, s)
}
func autoConvert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in *v1alpha1.ResourceClassParametersReference, out *resource.ResourceClassParametersReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference is an autogenerated conversion function.
func Convert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in *v1alpha1.ResourceClassParametersReference, out *resource.ResourceClassParametersReference, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceClassParametersReference_To_resource_ResourceClassParametersReference(in, out, s)
}
func autoConvert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(in *resource.ResourceClassParametersReference, out *v1alpha1.ResourceClassParametersReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference is an autogenerated conversion function.
func Convert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(in *resource.ResourceClassParametersReference, out *v1alpha1.ResourceClassParametersReference, s conversion.Scope) error {
return autoConvert_resource_ResourceClassParametersReference_To_v1alpha1_ResourceClassParametersReference(in, out, s)
}

View File

@ -0,0 +1,62 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "k8s.io/api/resource/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaim{}, func(obj interface{}) { SetObjectDefaults_ResourceClaim(obj.(*v1alpha1.ResourceClaim)) })
scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaimList{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimList(obj.(*v1alpha1.ResourceClaimList)) })
scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaimTemplate{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimTemplate(obj.(*v1alpha1.ResourceClaimTemplate)) })
scheme.AddTypeDefaultingFunc(&v1alpha1.ResourceClaimTemplateList{}, func(obj interface{}) {
SetObjectDefaults_ResourceClaimTemplateList(obj.(*v1alpha1.ResourceClaimTemplateList))
})
return nil
}
func SetObjectDefaults_ResourceClaim(in *v1alpha1.ResourceClaim) {
SetDefaults_ResourceClaimSpec(&in.Spec)
}
func SetObjectDefaults_ResourceClaimList(in *v1alpha1.ResourceClaimList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaim(a)
}
}
func SetObjectDefaults_ResourceClaimTemplate(in *v1alpha1.ResourceClaimTemplate) {
SetDefaults_ResourceClaimSpec(&in.Spec.Spec)
}
func SetObjectDefaults_ResourceClaimTemplateList(in *v1alpha1.ResourceClaimTemplateList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaimTemplate(a)
}
}

View File

@ -0,0 +1,310 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/resource"
)
// validateResourceClaimName can be used to check whether the given
// name for a ResourceClaim is valid.
var validateResourceClaimName = apimachineryvalidation.NameIsDNSSubdomain
// validateResourceClaimTemplateName can be used to check whether the given
// name for a ResourceClaimTemplate is valid.
var validateResourceClaimTemplateName = apimachineryvalidation.NameIsDNSSubdomain
// validateResourceDriverName reuses the validation of a CSI driver because
// the allowed values are exactly the same.
var validateResourceDriverName = corevalidation.ValidateCSIDriverName
// ValidateClaim validates a ResourceClaim.
func ValidateClaim(resourceClaim *resource.ResourceClaim) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, validateResourceClaimName, field.NewPath("metadata"))
allErrs = append(allErrs, validateResourceClaimSpec(&resourceClaim.Spec, field.NewPath("spec"))...)
return allErrs
}
func validateResourceClaimSpec(spec *resource.ResourceClaimSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range corevalidation.ValidateClassName(spec.ResourceClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClassName"), spec.ResourceClassName, msg))
}
allErrs = append(allErrs, validateResourceClaimParameters(spec.ParametersRef, fldPath.Child("parametersRef"))...)
if !supportedAllocationModes.Has(string(spec.AllocationMode)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("allocationMode"), spec.AllocationMode, supportedAllocationModes.List()))
}
return allErrs
}
var supportedAllocationModes = sets.NewString(string(resource.AllocationModeImmediate), string(resource.AllocationModeWaitForFirstConsumer))
// It would have been nice to use Go generics to reuse the same validation
// function for Kind and Name in both types, but generics cannot be used to
// access common fields in structs.
func validateResourceClaimParameters(ref *resource.ResourceClaimParametersReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if ref != nil {
if ref.Kind == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
}
if ref.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
}
return allErrs
}
func validateClassParameters(ref *resource.ResourceClassParametersReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if ref != nil {
if ref.Kind == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
}
if ref.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
if ref.Namespace != "" {
for _, msg := range apimachineryvalidation.ValidateNamespaceName(ref.Namespace, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), ref.Namespace, msg))
}
}
}
return allErrs
}
// ValidateClass validates a ResourceClass.
func ValidateClass(resourceClass *resource.ResourceClass) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&resourceClass.ObjectMeta, false, corevalidation.ValidateClassName, field.NewPath("metadata"))
allErrs = append(allErrs, validateResourceDriverName(resourceClass.DriverName, field.NewPath("driverName"))...)
allErrs = append(allErrs, validateClassParameters(resourceClass.ParametersRef, field.NewPath("parametersRef"))...)
if resourceClass.SuitableNodes != nil {
allErrs = append(allErrs, corevalidation.ValidateNodeSelector(resourceClass.SuitableNodes, field.NewPath("suitableNodes"))...)
}
return allErrs
}
// ValidateClassUpdate tests if an update to ResourceClass is valid.
func ValidateClassUpdate(resourceClass, oldClass *resource.ResourceClass) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClass.ObjectMeta, &oldClass.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateClass(resourceClass)...)
return allErrs
}
// ValidateClaimUpdate tests if an update to ResourceClaim is valid.
func ValidateClaimUpdate(resourceClaim, oldClaim *resource.ResourceClaim) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldClaim.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(resourceClaim.Spec, oldClaim.Spec, field.NewPath("spec"))...)
allErrs = append(allErrs, ValidateClaim(resourceClaim)...)
return allErrs
}
// ValidateClaimStatusUpdate tests if an update to the status of a ResourceClaim is valid.
func ValidateClaimStatusUpdate(resourceClaim, oldClaim *resource.ResourceClaim) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldClaim.ObjectMeta, field.NewPath("metadata"))
fldPath := field.NewPath("status")
// The name might not be set yet.
if resourceClaim.Status.DriverName != "" {
allErrs = append(allErrs, validateResourceDriverName(resourceClaim.Status.DriverName, fldPath.Child("driverName"))...)
} else if resourceClaim.Status.Allocation != nil {
allErrs = append(allErrs, field.Required(fldPath.Child("driverName"), "must be specified when `allocation` is set"))
}
allErrs = append(allErrs, validateAllocationResult(resourceClaim.Status.Allocation, fldPath.Child("allocation"))...)
allErrs = append(allErrs, validateSliceIsASet(resourceClaim.Status.ReservedFor, resource.ResourceClaimReservedForMaxSize,
validateResourceClaimUserReference, fldPath.Child("reservedFor"))...)
// Now check for invariants that must be valid for a ResourceClaim.
if len(resourceClaim.Status.ReservedFor) > 0 {
if resourceClaim.Status.Allocation == nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("reservedFor"), "may not be specified when `allocated` is not set"))
} else {
if !resourceClaim.Status.Allocation.Shareable && len(resourceClaim.Status.ReservedFor) > 1 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("reservedFor"), "may not be reserved more than once"))
}
// Items may be removed from ReservedFor while the claim is meant to be deallocated,
// but not added.
if resourceClaim.DeletionTimestamp != nil || resourceClaim.Status.DeallocationRequested {
oldSet := sets.New(oldClaim.Status.ReservedFor...)
newSet := sets.New(resourceClaim.Status.ReservedFor...)
newItems := newSet.Difference(oldSet)
if len(newItems) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("reservedFor"), "new entries may not be added while `deallocationRequested` or `deletionTimestamp` are set"))
}
}
}
}
if !oldClaim.Status.DeallocationRequested &&
resourceClaim.Status.DeallocationRequested &&
len(resourceClaim.Status.ReservedFor) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("deallocationRequested"), "deallocation cannot be requested while `reservedFor` is set"))
}
if resourceClaim.Status.Allocation == nil &&
resourceClaim.Status.DeallocationRequested {
// Either one or the other field was modified incorrectly.
// For the sake of simplicity this only reports the invalid
// end result.
allErrs = append(allErrs, field.Forbidden(fldPath, "`allocation` must be set when `deallocationRequested` is set"))
}
// Once deallocation has been requested, that request cannot be removed
// anymore because the deallocation may already have started. The field
// can only get reset by the driver together with removing the
// allocation.
if oldClaim.Status.DeallocationRequested &&
!resourceClaim.Status.DeallocationRequested &&
resourceClaim.Status.Allocation != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("deallocationRequested"), "may not be cleared when `allocation` is set"))
}
return allErrs
}
func validateAllocationResult(allocation *resource.AllocationResult, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if allocation != nil {
if len(allocation.ResourceHandle) > resource.ResourceHandleMaxSize {
allErrs = append(allErrs, field.TooLongMaxLength(fldPath.Child("resourceHandle"), len(allocation.ResourceHandle), resource.ResourceHandleMaxSize))
}
if allocation.AvailableOnNodes != nil {
allErrs = append(allErrs, corevalidation.ValidateNodeSelector(allocation.AvailableOnNodes, fldPath.Child("availableOnNodes"))...)
}
}
return allErrs
}
func validateResourceClaimUserReference(ref resource.ResourceClaimConsumerReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if ref.Resource == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("resource"), ""))
}
if ref.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
if ref.UID == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("uid"), ""))
}
return allErrs
}
// validateSliceIsASet ensures that a slice contains no duplicates and does not exceed a certain maximum size.
func validateSliceIsASet[T comparable](slice []T, maxSize int, validateItem func(item T, fldPath *field.Path) field.ErrorList, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
allItems := sets.New[T]()
for i, item := range slice {
idxPath := fldPath.Index(i)
if allItems.Has(item) {
allErrs = append(allErrs, field.Duplicate(idxPath, item))
} else {
allErrs = append(allErrs, validateItem(item, idxPath)...)
allItems.Insert(item)
}
}
if len(slice) > maxSize {
// Dumping the entire field into the error message is likely to be too long,
// in particular when it is already beyond the maximum size. Instead this
// just shows the number of entries.
allErrs = append(allErrs, field.TooLongMaxLength(fldPath, len(slice), maxSize))
}
return allErrs
}
// ValidatePodScheduling validates a PodScheduling.
func ValidatePodScheduling(resourceClaim *resource.PodScheduling) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
allErrs = append(allErrs, validatePodSchedulingSpec(&resourceClaim.Spec, field.NewPath("spec"))...)
return allErrs
}
func validatePodSchedulingSpec(spec *resource.PodSchedulingSpec, fldPath *field.Path) field.ErrorList {
allErrs := validateSliceIsASet(spec.PotentialNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, fldPath.Child("potentialNodes"))
return allErrs
}
// ValidatePodSchedulingUpdate tests if an update to PodScheduling is valid.
func ValidatePodSchedulingUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePodScheduling(resourceClaim)...)
return allErrs
}
// ValidatePodSchedulingStatusUpdate tests if an update to the status of a PodScheduling is valid.
func ValidatePodSchedulingStatusUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, validatePodSchedulingStatus(&resourceClaim.Status, field.NewPath("status"))...)
return allErrs
}
func validatePodSchedulingStatus(status *resource.PodSchedulingStatus, fldPath *field.Path) field.ErrorList {
return validatePodSchedulingClaims(status.ResourceClaims, fldPath.Child("claims"))
}
func validatePodSchedulingClaims(claimStatuses []resource.ResourceClaimSchedulingStatus, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
names := sets.NewString()
for i, claimStatus := range claimStatuses {
allErrs = append(allErrs, validatePodSchedulingClaim(claimStatus, fldPath.Index(i))...)
if names.Has(claimStatus.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), claimStatus.Name))
} else {
names.Insert(claimStatus.Name)
}
}
return allErrs
}
func validatePodSchedulingClaim(status resource.ResourceClaimSchedulingStatus, fldPath *field.Path) field.ErrorList {
allErrs := validateSliceIsASet(status.UnsuitableNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, fldPath.Child("unsuitableNodes"))
return allErrs
}
// ValidateClaimTemplace validates a ResourceClaimTemplate.
func ValidateClaimTemplate(template *resource.ResourceClaimTemplate) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&template.ObjectMeta, true, validateResourceClaimTemplateName, field.NewPath("metadata"))
allErrs = append(allErrs, validateResourceClaimTemplateSpec(&template.Spec, field.NewPath("spec"))...)
return allErrs
}
func validateResourceClaimTemplateSpec(spec *resource.ResourceClaimTemplateSpec, fldPath *field.Path) field.ErrorList {
allErrs := corevalidation.ValidateTemplateObjectMeta(&spec.ObjectMeta, fldPath.Child("metadata"))
allErrs = append(allErrs, validateResourceClaimSpec(&spec.Spec, fldPath.Child("spec"))...)
return allErrs
}
// ValidateClaimTemplateUpdate tests if an update to template is valid.
func ValidateClaimTemplateUpdate(template, oldTemplate *resource.ResourceClaimTemplate) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&template.ObjectMeta, &oldTemplate.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(template.Spec, oldTemplate.Spec, field.NewPath("spec"))...)
allErrs = append(allErrs, ValidateClaimTemplate(template)...)
return allErrs
}
func validateNodeName(name string, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
for _, msg := range corevalidation.ValidateNodeName(name, false) {
allErrs = append(allErrs, field.Invalid(fldPath, name, msg))
}
return allErrs
}

View File

@ -0,0 +1,338 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/pointer"
)
func testPodScheduling(name, namespace string, spec resource.PodSchedulingSpec) *resource.PodScheduling {
return &resource.PodScheduling{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: spec,
}
}
func TestValidatePodScheduling(t *testing.T) {
goodName := "foo"
goodNS := "ns"
goodPodSchedulingSpec := resource.PodSchedulingSpec{}
now := metav1.Now()
badName := "!@#$%^"
badValue := "spaces not allowed"
scenarios := map[string]struct {
scheduling *resource.PodScheduling
wantFailures field.ErrorList
}{
"good-scheduling": {
scheduling: testPodScheduling(goodName, goodNS, goodPodSchedulingSpec),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
scheduling: testPodScheduling("", goodNS, goodPodSchedulingSpec),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
scheduling: testPodScheduling(badName, goodNS, goodPodSchedulingSpec),
},
"missing-namespace": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
scheduling: testPodScheduling(goodName, "", goodPodSchedulingSpec),
},
"generate-name": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.GenerateName = "pvc-"
return scheduling
}(),
},
"uid": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return scheduling
}(),
},
"resource-version": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.ResourceVersion = "1"
return scheduling
}(),
},
"generation": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Generation = 100
return scheduling
}(),
},
"creation-timestamp": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.CreationTimestamp = now
return scheduling
}(),
},
"deletion-grace-period-seconds": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.DeletionGracePeriodSeconds = pointer.Int64(10)
return scheduling
}(),
},
"owner-references": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return scheduling
}(),
},
"finalizers": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Finalizers = []string{
"example.com/foo",
}
return scheduling
}(),
},
"managed-fields": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return scheduling
}(),
},
"good-labels": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return scheduling
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Labels = map[string]string{
"hello-world": badValue,
}
return scheduling
}(),
},
"good-annotations": {
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Annotations = map[string]string{
"foo": "bar",
}
return scheduling
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
scheduling: func() *resource.PodScheduling {
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
scheduling.Annotations = map[string]string{
badName: "hello world",
}
return scheduling
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidatePodScheduling(scenario.scheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidatePodSchedulingUpdate(t *testing.T) {
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
badName := "!@#$%^"
scenarios := map[string]struct {
oldScheduling *resource.PodScheduling
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
},
"add-selected-node": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Spec.SelectedNode = "worker1"
return scheduling
},
},
"add-potential-nodes": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
}
return scheduling
},
},
"invalid-potential-nodes-too-long": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
}
return scheduling
},
},
"invalid-potential-nodes-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, badName)
return scheduling
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldScheduling.ResourceVersion = "1"
errs := ValidatePodSchedulingUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
badName := "!@#$%^"
scenarios := map[string]struct {
oldScheduling *resource.PodScheduling
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
},
"add-claim-status": {
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
fmt.Sprintf("worker%d", i),
)
}
return scheduling
},
},
"invalid-duplicated-claim-status": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
for i := 0; i < 2; i++ {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
)
}
return scheduling
},
},
"invalid-too-long-claim-status": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
fmt.Sprintf("worker%d", i),
)
}
return scheduling
},
},
"invalid-node-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
oldScheduling: validScheduling,
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
resource.ResourceClaimSchedulingStatus{
Name: "my-claim",
},
)
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
badName,
)
return scheduling
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldScheduling.ResourceVersion = "1"
errs := ValidatePodSchedulingStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@ -0,0 +1,629 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/pointer"
)
func testClaim(name, namespace string, spec resource.ResourceClaimSpec) *resource.ResourceClaim {
return &resource.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: spec,
}
}
func TestValidateClaim(t *testing.T) {
validMode := resource.AllocationModeImmediate
invalidMode := resource.AllocationMode("invalid")
goodName := "foo"
badName := "!@#$%^"
goodNS := "ns"
goodClaimSpec := resource.ResourceClaimSpec{
ResourceClassName: goodName,
AllocationMode: validMode,
}
now := metav1.Now()
badValue := "spaces not allowed"
scenarios := map[string]struct {
claim *resource.ResourceClaim
wantFailures field.ErrorList
}{
"good-claim": {
claim: testClaim(goodName, goodNS, goodClaimSpec),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
claim: testClaim("", goodNS, goodClaimSpec),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
claim: testClaim(badName, goodNS, goodClaimSpec),
},
"missing-namespace": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
claim: testClaim(goodName, "", goodClaimSpec),
},
"generate-name": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.GenerateName = "pvc-"
return claim
}(),
},
"uid": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return claim
}(),
},
"resource-version": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.ResourceVersion = "1"
return claim
}(),
},
"generation": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Generation = 100
return claim
}(),
},
"creation-timestamp": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.CreationTimestamp = now
return claim
}(),
},
"deletion-grace-period-seconds": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.DeletionGracePeriodSeconds = pointer.Int64(10)
return claim
}(),
},
"owner-references": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return claim
}(),
},
"finalizers": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Finalizers = []string{
"example.com/foo",
}
return claim
}(),
},
"managed-fields": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return claim
}(),
},
"good-labels": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return claim
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Labels = map[string]string{
"hello-world": badValue,
}
return claim
}(),
},
"good-annotations": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Annotations = map[string]string{
"foo": "bar",
}
return claim
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Annotations = map[string]string{
badName: "hello world",
}
return claim
}(),
},
"bad-classname": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "resourceClassName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Spec.ResourceClassName = badName
return claim
}(),
},
"bad-mode": {
wantFailures: field.ErrorList{field.NotSupported(field.NewPath("spec", "allocationMode"), invalidMode, supportedAllocationModes.List())},
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Spec.AllocationMode = invalidMode
return claim
}(),
},
"good-parameters": {
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
Kind: "foo",
Name: "bar",
}
return claim
}(),
},
"missing-parameters-kind": {
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "parametersRef", "kind"), "")},
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
Name: "bar",
}
return claim
}(),
},
"missing-parameters-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "parametersRef", "name"), "")},
claim: func() *resource.ResourceClaim {
claim := testClaim(goodName, goodNS, goodClaimSpec)
claim.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
Kind: "foo",
}
return claim
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateClaim(scenario.claim)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateClaimUpdate(t *testing.T) {
name := "valid"
parameters := &resource.ResourceClaimParametersReference{
Kind: "foo",
Name: "bar",
}
validClaim := testClaim("foo", "ns", resource.ResourceClaimSpec{
ResourceClassName: name,
AllocationMode: resource.AllocationModeImmediate,
ParametersRef: parameters,
})
scenarios := map[string]struct {
oldClaim *resource.ResourceClaim
update func(claim *resource.ResourceClaim) *resource.ResourceClaim
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { return claim },
},
"invalid-update-class": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimSpec {
spec := validClaim.Spec.DeepCopy()
spec.ResourceClassName += "2"
return *spec
}(), "field is immutable")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Spec.ResourceClassName += "2"
return claim
},
},
"invalid-update-remove-parameters": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimSpec {
spec := validClaim.Spec.DeepCopy()
spec.ParametersRef = nil
return *spec
}(), "field is immutable")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Spec.ParametersRef = nil
return claim
},
},
"invalid-update-mode": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimSpec {
spec := validClaim.Spec.DeepCopy()
spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
return *spec
}(), "field is immutable")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
return claim
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldClaim.ResourceVersion = "1"
errs := ValidateClaimUpdate(scenario.update(scenario.oldClaim.DeepCopy()), scenario.oldClaim)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateClaimStatusUpdate(t *testing.T) {
validClaim := testClaim("foo", "ns", resource.ResourceClaimSpec{
ResourceClassName: "valid",
AllocationMode: resource.AllocationModeImmediate,
})
validAllocatedClaim := validClaim.DeepCopy()
validAllocatedClaim.Status = resource.ResourceClaimStatus{
DriverName: "valid",
Allocation: &resource.AllocationResult{
ResourceHandle: strings.Repeat(" ", resource.ResourceHandleMaxSize),
Shareable: true,
},
}
scenarios := map[string]struct {
oldClaim *resource.ResourceClaim
update func(claim *resource.ResourceClaim) *resource.ResourceClaim
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { return claim },
},
"add-driver": {
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
return claim
},
},
"invalid-add-allocation": {
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "driverName"), "must be specified when `allocation` is set")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
// DriverName must also get set here!
claim.Status.Allocation = &resource.AllocationResult{}
return claim
},
},
"valid-add-allocation": {
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandle: strings.Repeat(" ", resource.ResourceHandleMaxSize),
}
return claim
},
},
"invalid-allocation-handle": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "allocation", "resourceHandle"), resource.ResourceHandleMaxSize+1, resource.ResourceHandleMaxSize)},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandle: strings.Repeat(" ", resource.ResourceHandleMaxSize+1),
}
return claim
},
},
"invalid-node-selector": {
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "allocation", "availableOnNodes", "nodeSelectorTerms"), "must have at least one node selector term")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
AvailableOnNodes: &core.NodeSelector{
// Must not be empty.
},
}
return claim
},
},
"add-reservation": {
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
for i := 0; i < resource.ResourceClaimReservedForMaxSize; i++ {
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
})
}
return claim
},
},
"add-reservation-and-allocation": {
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status = *validAllocatedClaim.Status.DeepCopy()
for i := 0; i < resource.ResourceClaimReservedForMaxSize; i++ {
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
})
}
return claim
},
},
"invalid-reserved-for-too-large": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "reservedFor"), resource.ResourceClaimReservedForMaxSize+1, resource.ResourceClaimReservedForMaxSize)},
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
for i := 0; i < resource.ResourceClaimReservedForMaxSize+1; i++ {
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
})
}
return claim
},
},
"invalid-reserved-for-duplicate": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "reservedFor").Index(1), resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: "foo",
UID: "1",
})},
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
for i := 0; i < 2; i++ {
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: "foo",
UID: "1",
})
}
return claim
},
},
"invalid-reserved-for-not-shared": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "may not be reserved more than once")},
oldClaim: func() *resource.ResourceClaim {
claim := validAllocatedClaim.DeepCopy()
claim.Status.Allocation.Shareable = false
return claim
}(),
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
for i := 0; i < 2; i++ {
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
})
}
return claim
},
},
"invalid-reserved-for-no-allocation": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "may not be specified when `allocated` is not set")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
{
Resource: "pods",
Name: "foo",
UID: "1",
},
}
return claim
},
},
"invalid-reserved-for-no-resource": {
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "reservedFor").Index(0).Child("resource"), "")},
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
{
Name: "foo",
UID: "1",
},
}
return claim
},
},
"invalid-reserved-for-no-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "reservedFor").Index(0).Child("name"), "")},
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
{
Resource: "pods",
UID: "1",
},
}
return claim
},
},
"invalid-reserved-for-no-uid": {
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "reservedFor").Index(0).Child("uid"), "")},
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
{
Resource: "pods",
Name: "foo",
},
}
return claim
},
},
"invalid-reserved-deleted": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "new entries may not be added while `deallocationRequested` or `deletionTimestamp` are set")},
oldClaim: func() *resource.ResourceClaim {
claim := validAllocatedClaim.DeepCopy()
var deletionTimestamp metav1.Time
claim.DeletionTimestamp = &deletionTimestamp
return claim
}(),
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
{
Resource: "pods",
Name: "foo",
UID: "1",
},
}
return claim
},
},
"invalid-reserved-deallocation-requested": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "new entries may not be added while `deallocationRequested` or `deletionTimestamp` are set")},
oldClaim: func() *resource.ResourceClaim {
claim := validAllocatedClaim.DeepCopy()
claim.Status.DeallocationRequested = true
return claim
}(),
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
{
Resource: "pods",
Name: "foo",
UID: "1",
},
}
return claim
},
},
"add-deallocation-requested": {
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DeallocationRequested = true
return claim
},
},
"invalid-deallocation-requested-removal": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "deallocationRequested"), "may not be cleared when `allocation` is set")},
oldClaim: func() *resource.ResourceClaim {
claim := validAllocatedClaim.DeepCopy()
claim.Status.DeallocationRequested = true
return claim
}(),
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DeallocationRequested = false
return claim
},
},
"invalid-deallocation-requested-in-use": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "deallocationRequested"), "deallocation cannot be requested while `reservedFor` is set")},
oldClaim: func() *resource.ResourceClaim {
claim := validAllocatedClaim.DeepCopy()
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
{
Resource: "pods",
Name: "foo",
UID: "1",
},
}
return claim
}(),
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DeallocationRequested = true
return claim
},
},
"invalid-deallocation-not-allocated": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status"), "`allocation` must be set when `deallocationRequested` is set")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DeallocationRequested = true
return claim
},
},
"invalid-allocation-removal-not-reset": {
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status"), "`allocation` must be set when `deallocationRequested` is set")},
oldClaim: func() *resource.ResourceClaim {
claim := validAllocatedClaim.DeepCopy()
claim.Status.DeallocationRequested = true
return claim
}(),
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.Allocation = nil
return claim
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldClaim.ResourceVersion = "1"
errs := ValidateClaimStatusUpdate(scenario.update(scenario.oldClaim.DeepCopy()), scenario.oldClaim)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@ -0,0 +1,313 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/pointer"
)
func testClaimTemplate(name, namespace string, spec resource.ResourceClaimSpec) *resource.ResourceClaimTemplate {
return &resource.ResourceClaimTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: resource.ResourceClaimTemplateSpec{
Spec: spec,
},
}
}
func TestValidateClaimTemplate(t *testing.T) {
validMode := resource.AllocationModeImmediate
invalidMode := resource.AllocationMode("invalid")
goodName := "foo"
badName := "!@#$%^"
goodNS := "ns"
goodClaimSpec := resource.ResourceClaimSpec{
ResourceClassName: goodName,
AllocationMode: validMode,
}
now := metav1.Now()
badValue := "spaces not allowed"
scenarios := map[string]struct {
template *resource.ResourceClaimTemplate
wantFailures field.ErrorList
}{
"good-claim": {
template: testClaimTemplate(goodName, goodNS, goodClaimSpec),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
template: testClaimTemplate("", goodNS, goodClaimSpec),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
template: testClaimTemplate(badName, goodNS, goodClaimSpec),
},
"missing-namespace": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
template: testClaimTemplate(goodName, "", goodClaimSpec),
},
"generate-name": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.GenerateName = "pvc-"
return template
}(),
},
"uid": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return template
}(),
},
"resource-version": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.ResourceVersion = "1"
return template
}(),
},
"generation": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Generation = 100
return template
}(),
},
"creation-timestamp": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.CreationTimestamp = now
return template
}(),
},
"deletion-grace-period-seconds": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.DeletionGracePeriodSeconds = pointer.Int64(10)
return template
}(),
},
"owner-references": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return template
}(),
},
"finalizers": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Finalizers = []string{
"example.com/foo",
}
return template
}(),
},
"managed-fields": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return template
}(),
},
"good-labels": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return template
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Labels = map[string]string{
"hello-world": badValue,
}
return template
}(),
},
"good-annotations": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Annotations = map[string]string{
"foo": "bar",
}
return template
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Annotations = map[string]string{
badName: "hello world",
}
return template
}(),
},
"bad-classname": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "spec", "resourceClassName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Spec.Spec.ResourceClassName = badName
return template
}(),
},
"bad-mode": {
wantFailures: field.ErrorList{field.NotSupported(field.NewPath("spec", "spec", "allocationMode"), invalidMode, supportedAllocationModes.List())},
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Spec.Spec.AllocationMode = invalidMode
return template
}(),
},
"good-parameters": {
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Spec.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
Kind: "foo",
Name: "bar",
}
return template
}(),
},
"missing-parameters-kind": {
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "spec", "parametersRef", "kind"), "")},
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Spec.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
Name: "bar",
}
return template
}(),
},
"missing-parameters-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "spec", "parametersRef", "name"), "")},
template: func() *resource.ResourceClaimTemplate {
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
template.Spec.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
Kind: "foo",
}
return template
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateClaimTemplate(scenario.template)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateClaimTemplateUpdate(t *testing.T) {
name := "valid"
parameters := &resource.ResourceClaimParametersReference{
Kind: "foo",
Name: "bar",
}
validClaimTemplate := testClaimTemplate("foo", "ns", resource.ResourceClaimSpec{
ResourceClassName: name,
AllocationMode: resource.AllocationModeImmediate,
ParametersRef: parameters,
})
scenarios := map[string]struct {
oldClaimTemplate *resource.ResourceClaimTemplate
update func(claim *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldClaimTemplate: validClaimTemplate,
update: func(claim *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate { return claim },
},
"invalid-update-class": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimTemplateSpec {
spec := validClaimTemplate.Spec.DeepCopy()
spec.Spec.ResourceClassName += "2"
return *spec
}(), "field is immutable")},
oldClaimTemplate: validClaimTemplate,
update: func(template *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate {
template.Spec.Spec.ResourceClassName += "2"
return template
},
},
"invalid-update-remove-parameters": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimTemplateSpec {
spec := validClaimTemplate.Spec.DeepCopy()
spec.Spec.ParametersRef = nil
return *spec
}(), "field is immutable")},
oldClaimTemplate: validClaimTemplate,
update: func(template *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate {
template.Spec.Spec.ParametersRef = nil
return template
},
},
"invalid-update-mode": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimTemplateSpec {
spec := validClaimTemplate.Spec.DeepCopy()
spec.Spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
return *spec
}(), "field is immutable")},
oldClaimTemplate: validClaimTemplate,
update: func(template *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate {
template.Spec.Spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
return template
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldClaimTemplate.ResourceVersion = "1"
errs := ValidateClaimTemplateUpdate(scenario.update(scenario.oldClaimTemplate.DeepCopy()), scenario.oldClaimTemplate)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@ -0,0 +1,282 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/pointer"
)
func testClass(name, driverName string) *resource.ResourceClass {
return &resource.ResourceClass{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
DriverName: driverName,
}
}
func TestValidateClass(t *testing.T) {
goodName := "foo"
now := metav1.Now()
goodParameters := resource.ResourceClassParametersReference{
Name: "valid",
Namespace: "valid",
Kind: "foo",
}
badName := "!@#$%^"
badValue := "spaces not allowed"
scenarios := map[string]struct {
class *resource.ResourceClass
wantFailures field.ErrorList
}{
"good-class": {
class: testClass(goodName, goodName),
},
"good-long-driver-name": {
class: testClass(goodName, "acme.example.com"),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
class: testClass("", goodName),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
class: testClass(badName, goodName),
},
"generate-name": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.GenerateName = "pvc-"
return class
}(),
},
"uid": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return class
}(),
},
"resource-version": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.ResourceVersion = "1"
return class
}(),
},
"generation": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.Generation = 100
return class
}(),
},
"creation-timestamp": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.CreationTimestamp = now
return class
}(),
},
"deletion-grace-period-seconds": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.DeletionGracePeriodSeconds = pointer.Int64(10)
return class
}(),
},
"owner-references": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return class
}(),
},
"finalizers": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.Finalizers = []string{
"example.com/foo",
}
return class
}(),
},
"managed-fields": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return class
}(),
},
"good-labels": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return class
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.Labels = map[string]string{
"hello-world": badValue,
}
return class
}(),
},
"good-annotations": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.Annotations = map[string]string{
"foo": "bar",
}
return class
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.Annotations = map[string]string{
badName: "hello world",
}
return class
}(),
},
"missing-driver-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("driverName"), ""),
field.Invalid(field.NewPath("driverName"), "", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
},
class: testClass(goodName, ""),
},
"invalid-driver-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
class: testClass(goodName, badName),
},
"invalid-qualified-driver-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), goodName+"/path", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
class: testClass(goodName, goodName+"/path"),
},
"good-parameters": {
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.ParametersRef = goodParameters.DeepCopy()
return class
}(),
},
"missing-parameters-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("parametersRef", "name"), "")},
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.ParametersRef = goodParameters.DeepCopy()
class.ParametersRef.Name = ""
return class
}(),
},
"bad-parameters-namespace": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("parametersRef", "namespace"), badName, "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')")},
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.ParametersRef = goodParameters.DeepCopy()
class.ParametersRef.Namespace = badName
return class
}(),
},
"missing-parameters-kind": {
wantFailures: field.ErrorList{field.Required(field.NewPath("parametersRef", "kind"), "")},
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.ParametersRef = goodParameters.DeepCopy()
class.ParametersRef.Kind = ""
return class
}(),
},
"invalid-node-selector": {
wantFailures: field.ErrorList{field.Required(field.NewPath("suitableNodes", "nodeSelectorTerms"), "must have at least one node selector term")},
class: func() *resource.ResourceClass {
class := testClass(goodName, goodName)
class.SuitableNodes = &core.NodeSelector{
// Must not be empty.
}
return class
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateClass(scenario.class)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateClassUpdate(t *testing.T) {
validClass := testClass("foo", "valid")
scenarios := map[string]struct {
oldClass *resource.ResourceClass
update func(class *resource.ResourceClass) *resource.ResourceClass
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldClass: validClass,
update: func(class *resource.ResourceClass) *resource.ResourceClass { return class },
},
"update-driver": {
oldClass: validClass,
update: func(class *resource.ResourceClass) *resource.ResourceClass {
class.DriverName += "2"
return class
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldClass.ResourceVersion = "1"
errs := ValidateClassUpdate(scenario.update(scenario.oldClass.DeepCopy()), scenario.oldClass)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

477
pkg/apis/resource/zz_generated.deepcopy.go generated Normal file
View File

@ -0,0 +1,477 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package resource
import (
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
*out = *in
if in.AvailableOnNodes != nil {
in, out := &in.AvailableOnNodes, &out.AvailableOnNodes
*out = new(core.NodeSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult.
func (in *AllocationResult) DeepCopy() *AllocationResult {
if in == nil {
return nil
}
out := new(AllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodScheduling) DeepCopyInto(out *PodScheduling) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScheduling.
func (in *PodScheduling) DeepCopy() *PodScheduling {
if in == nil {
return nil
}
out := new(PodScheduling)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodScheduling) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodScheduling, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingList.
func (in *PodSchedulingList) DeepCopy() *PodSchedulingList {
if in == nil {
return nil
}
out := new(PodSchedulingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSchedulingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) {
*out = *in
if in.PotentialNodes != nil {
in, out := &in.PotentialNodes, &out.PotentialNodes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingSpec.
func (in *PodSchedulingSpec) DeepCopy() *PodSchedulingSpec {
if in == nil {
return nil
}
out := new(PodSchedulingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) {
*out = *in
if in.ResourceClaims != nil {
in, out := &in.ResourceClaims, &out.ResourceClaims
*out = make([]ResourceClaimSchedulingStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingStatus.
func (in *PodSchedulingStatus) DeepCopy() *PodSchedulingStatus {
if in == nil {
return nil
}
out := new(PodSchedulingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
func (in *ResourceClaim) DeepCopy() *ResourceClaim {
if in == nil {
return nil
}
out := new(ResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaim) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference.
func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference {
if in == nil {
return nil
}
out := new(ResourceClaimConsumerReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList.
func (in *ResourceClaimList) DeepCopy() *ResourceClaimList {
if in == nil {
return nil
}
out := new(ResourceClaimList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersReference.
func (in *ResourceClaimParametersReference) DeepCopy() *ResourceClaimParametersReference {
if in == nil {
return nil
}
out := new(ResourceClaimParametersReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) {
*out = *in
if in.UnsuitableNodes != nil {
in, out := &in.UnsuitableNodes, &out.UnsuitableNodes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus.
func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus {
if in == nil {
return nil
}
out := new(ResourceClaimSchedulingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) {
*out = *in
if in.ParametersRef != nil {
in, out := &in.ParametersRef, &out.ParametersRef
*out = new(ResourceClaimParametersReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec.
func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec {
if in == nil {
return nil
}
out := new(ResourceClaimSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) {
*out = *in
if in.Allocation != nil {
in, out := &in.Allocation, &out.Allocation
*out = new(AllocationResult)
(*in).DeepCopyInto(*out)
}
if in.ReservedFor != nil {
in, out := &in.ReservedFor, &out.ReservedFor
*out = make([]ResourceClaimConsumerReference, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus.
func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus {
if in == nil {
return nil
}
out := new(ResourceClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate.
func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate {
if in == nil {
return nil
}
out := new(ResourceClaimTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClaimTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList.
func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList {
if in == nil {
return nil
}
out := new(ResourceClaimTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec.
func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec {
if in == nil {
return nil
}
out := new(ResourceClaimTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClass) DeepCopyInto(out *ResourceClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.ParametersRef != nil {
in, out := &in.ParametersRef, &out.ParametersRef
*out = new(ResourceClassParametersReference)
**out = **in
}
if in.SuitableNodes != nil {
in, out := &in.SuitableNodes, &out.SuitableNodes
*out = new(core.NodeSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClass.
func (in *ResourceClass) DeepCopy() *ResourceClass {
if in == nil {
return nil
}
out := new(ResourceClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassList) DeepCopyInto(out *ResourceClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassList.
func (in *ResourceClassList) DeepCopy() *ResourceClassList {
if in == nil {
return nil
}
out := new(ResourceClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersReference.
func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersReference {
if in == nil {
return nil
}
out := new(ResourceClassParametersReference)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,12 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-node-approvers
- klueska
- pohly
reviewers:
- klueska
- pohly
- bart0sh
labels:
- sig/node

View File

@ -0,0 +1,479 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceclaim
import (
"context"
"fmt"
"strings"
"time"
v1 "k8s.io/api/core/v1"
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
v1informers "k8s.io/client-go/informers/core/v1"
resourcev1alpha1informers "k8s.io/client-go/informers/resource/v1alpha1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
v1listers "k8s.io/client-go/listers/core/v1"
resourcev1alpha1listers "k8s.io/client-go/listers/resource/v1alpha1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/dynamic-resource-allocation/resourceclaim"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/resourceclaim/metrics"
)
const (
// podResourceClaimIndex is the lookup name for the index function which indexes by pod ResourceClaim templates.
podResourceClaimIndex = "pod-resource-claim-index"
maxUIDCacheEntries = 500
)
// Controller creates ResourceClaims for ResourceClaimTemplates in a pod spec.
type Controller struct {
// kubeClient is the kube API client used to communicate with the API
// server.
kubeClient clientset.Interface
// claimLister is the shared ResourceClaim lister used to fetch and store ResourceClaim
// objects from the API server. It is shared with other controllers and
// therefore the ResourceClaim objects in its store should be treated as immutable.
claimLister resourcev1alpha1listers.ResourceClaimLister
claimsSynced cache.InformerSynced
// podLister is the shared Pod lister used to fetch Pod
// objects from the API server. It is shared with other controllers and
// therefore the Pod objects in its store should be treated as immutable.
podLister v1listers.PodLister
podSynced cache.InformerSynced
// templateLister is the shared ResourceClaimTemplate lister used to
// fetch template objects from the API server. It is shared with other
// controllers and therefore the objects in its store should be treated
// as immutable.
templateLister resourcev1alpha1listers.ResourceClaimTemplateLister
templatesSynced cache.InformerSynced
// podIndexer has the common PodResourceClaim indexer indexer installed To
// limit iteration over pods to those of interest.
podIndexer cache.Indexer
// recorder is used to record events in the API server
recorder record.EventRecorder
queue workqueue.RateLimitingInterface
// The deletedObjects cache keeps track of Pods for which we know that
// they have existed and have been removed. For those we can be sure
// that a ReservedFor entry needs to be removed.
deletedObjects *uidCache
}
const (
claimKeyPrefix = "claim:"
podKeyPrefix = "pod:"
)
// NewController creates a ResourceClaim controller.
func NewController(
kubeClient clientset.Interface,
podInformer v1informers.PodInformer,
claimInformer resourcev1alpha1informers.ResourceClaimInformer,
templateInformer resourcev1alpha1informers.ResourceClaimTemplateInformer) (*Controller, error) {
ec := &Controller{
kubeClient: kubeClient,
podLister: podInformer.Lister(),
podIndexer: podInformer.Informer().GetIndexer(),
podSynced: podInformer.Informer().HasSynced,
claimLister: claimInformer.Lister(),
claimsSynced: claimInformer.Informer().HasSynced,
templateLister: templateInformer.Lister(),
templatesSynced: templateInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_claim"),
deletedObjects: newUIDCache(maxUIDCacheEntries),
}
metrics.RegisterMetrics()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "resource_claim"})
if _, err := podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ec.enqueuePod(obj, false)
},
UpdateFunc: func(old, updated interface{}) {
ec.enqueuePod(updated, false)
},
DeleteFunc: func(obj interface{}) {
ec.enqueuePod(obj, true)
},
}); err != nil {
return nil, err
}
if _, err := claimInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ec.onResourceClaimAddOrUpdate,
UpdateFunc: func(old, updated interface{}) {
ec.onResourceClaimAddOrUpdate(updated)
},
DeleteFunc: ec.onResourceClaimDelete,
}); err != nil {
return nil, err
}
if err := ec.podIndexer.AddIndexers(cache.Indexers{podResourceClaimIndex: podResourceClaimIndexFunc}); err != nil {
return nil, fmt.Errorf("could not initialize ResourceClaim controller: %w", err)
}
return ec, nil
}
func (ec *Controller) enqueuePod(obj interface{}, deleted bool) {
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = d.Obj
}
pod, ok := obj.(*v1.Pod)
if !ok {
// Not a pod?!
return
}
if deleted {
ec.deletedObjects.Add(pod.UID)
}
if len(pod.Spec.ResourceClaims) == 0 {
// Nothing to do for it at all.
return
}
// Release reservations of a deleted or completed pod?
if deleted ||
podutil.IsPodTerminal(pod) ||
// Deleted and not scheduled:
pod.DeletionTimestamp != nil && pod.Spec.NodeName == "" {
for _, podClaim := range pod.Spec.ResourceClaims {
claimName := resourceclaim.Name(pod, &podClaim)
ec.queue.Add(claimKeyPrefix + pod.Namespace + "/" + claimName)
}
}
// Create ResourceClaim for inline templates?
if pod.DeletionTimestamp == nil {
for _, podClaim := range pod.Spec.ResourceClaims {
if podClaim.Source.ResourceClaimTemplateName != nil {
// It has at least one inline template, work on it.
ec.queue.Add(podKeyPrefix + pod.Namespace + "/" + pod.Name)
break
}
}
}
}
func (ec *Controller) onResourceClaimAddOrUpdate(obj interface{}) {
claim, ok := obj.(*resourcev1alpha1.ResourceClaim)
if !ok {
return
}
// When starting up, we have to check all claims to find those with
// stale pods in ReservedFor. During an update, a pod might get added
// that already no longer exists.
ec.queue.Add(claimKeyPrefix + claim.Namespace + "/" + claim.Name)
}
func (ec *Controller) onResourceClaimDelete(obj interface{}) {
claim, ok := obj.(*resourcev1alpha1.ResourceClaim)
if !ok {
return
}
// Someone deleted a ResourceClaim, either intentionally or
// accidentally. If there is a pod referencing it because of
// an inline resource, then we should re-create the ResourceClaim.
// The common indexer does some prefiltering for us by
// limiting the list to those pods which reference
// the ResourceClaim.
objs, err := ec.podIndexer.ByIndex(podResourceClaimIndex, fmt.Sprintf("%s/%s", claim.Namespace, claim.Name))
if err != nil {
runtime.HandleError(fmt.Errorf("listing pods from cache: %v", err))
return
}
for _, obj := range objs {
ec.enqueuePod(obj, false)
}
}
func (ec *Controller) Run(ctx context.Context, workers int) {
defer runtime.HandleCrash()
defer ec.queue.ShutDown()
klog.Infof("Starting ephemeral volume controller")
defer klog.Infof("Shutting down ephemeral volume controller")
if !cache.WaitForNamedCacheSync("ephemeral", ctx.Done(), ec.podSynced, ec.claimsSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.UntilWithContext(ctx, ec.runWorker, time.Second)
}
<-ctx.Done()
}
func (ec *Controller) runWorker(ctx context.Context) {
for ec.processNextWorkItem(ctx) {
}
}
func (ec *Controller) processNextWorkItem(ctx context.Context) bool {
key, shutdown := ec.queue.Get()
if shutdown {
return false
}
defer ec.queue.Done(key)
err := ec.syncHandler(ctx, key.(string))
if err == nil {
ec.queue.Forget(key)
return true
}
runtime.HandleError(fmt.Errorf("%v failed with: %v", key, err))
ec.queue.AddRateLimited(key)
return true
}
// syncHandler is invoked for each work item which might need to be processed.
// If an error is returned from this function, the item will be requeued.
func (ec *Controller) syncHandler(ctx context.Context, key string) error {
sep := strings.Index(key, ":")
if sep < 0 {
return fmt.Errorf("unexpected key: %s", key)
}
prefix, object := key[0:sep+1], key[sep+1:]
namespace, name, err := cache.SplitMetaNamespaceKey(object)
if err != nil {
return err
}
switch prefix {
case podKeyPrefix:
return ec.syncPod(ctx, namespace, name)
case claimKeyPrefix:
return ec.syncClaim(ctx, namespace, name)
default:
return fmt.Errorf("unexpected key prefix: %s", prefix)
}
}
func (ec *Controller) syncPod(ctx context.Context, namespace, name string) error {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "pod", klog.KRef(namespace, name))
ctx = klog.NewContext(ctx, logger)
pod, err := ec.podLister.Pods(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
logger.V(5).Info("nothing to do for pod, it is gone")
return nil
}
return err
}
// Ignore pods which are already getting deleted.
if pod.DeletionTimestamp != nil {
logger.V(5).Info("nothing to do for pod, it is marked for deletion")
return nil
}
for _, podClaim := range pod.Spec.ResourceClaims {
if err := ec.handleClaim(ctx, pod, podClaim); err != nil {
ec.recorder.Event(pod, v1.EventTypeWarning, "FailedResourceClaimCreation", fmt.Sprintf("PodResourceClaim %s: %v", podClaim.Name, err))
return fmt.Errorf("pod %s/%s, PodResourceClaim %s: %v", namespace, name, podClaim.Name, err)
}
}
return nil
}
// handleResourceClaim is invoked for each volume of a pod.
func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.PodResourceClaim) error {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "podClaim", podClaim.Name)
ctx = klog.NewContext(ctx, logger)
logger.V(5).Info("checking", "podClaim", podClaim.Name)
templateName := podClaim.Source.ResourceClaimTemplateName
if templateName == nil {
return nil
}
claimName := resourceclaim.Name(pod, &podClaim)
claim, err := ec.claimLister.ResourceClaims(pod.Namespace).Get(claimName)
if err != nil && !errors.IsNotFound(err) {
return err
}
if claim != nil {
if err := resourceclaim.IsForPod(pod, claim); err != nil {
return err
}
// Already created, nothing more to do.
logger.V(5).Info("claim already created", "podClaim", podClaim.Name, "resourceClaim", claimName)
return nil
}
template, err := ec.templateLister.ResourceClaimTemplates(pod.Namespace).Get(*templateName)
if err != nil {
return fmt.Errorf("resource claim template %q: %v", *templateName, err)
}
// Create the ResourceClaim with pod as owner.
isTrue := true
claim = &resourcev1alpha1.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: claimName,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "Pod",
Name: pod.Name,
UID: pod.UID,
Controller: &isTrue,
BlockOwnerDeletion: &isTrue,
},
},
Annotations: template.Spec.ObjectMeta.Annotations,
Labels: template.Spec.ObjectMeta.Labels,
},
Spec: template.Spec.Spec,
}
metrics.ResourceClaimCreateAttempts.Inc()
_, err = ec.kubeClient.ResourceV1alpha1().ResourceClaims(pod.Namespace).Create(ctx, claim, metav1.CreateOptions{})
if err != nil {
metrics.ResourceClaimCreateFailures.Inc()
return fmt.Errorf("create ResourceClaim %s: %v", claimName, err)
}
return nil
}
func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) error {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "claim", klog.KRef(namespace, name))
ctx = klog.NewContext(ctx, logger)
claim, err := ec.claimLister.ResourceClaims(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
logger.V(5).Info("nothing to do for claim, it is gone")
return nil
}
return err
}
// Check if the ReservedFor entries are all still valid.
valid := make([]resourcev1alpha1.ResourceClaimConsumerReference, 0, len(claim.Status.ReservedFor))
for _, reservedFor := range claim.Status.ReservedFor {
if reservedFor.APIGroup == "" &&
reservedFor.Resource == "pods" {
// A pod falls into one of three categories:
// - we have it in our cache -> don't remove it until we are told that it got removed
// - we don't have it in our cache anymore, but we have seen it before -> it was deleted, remove it
// - not in our cache, not seen -> double-check with API server before removal
keepEntry := true
// Tracking deleted pods in the LRU cache is an
// optimization. Without this cache, the code would
// have to do the API call below for every deleted pod
// to ensure that the pod really doesn't exist. With
// the cache, most of the time the pod will be recorded
// as deleted and the API call can be avoided.
if ec.deletedObjects.Has(reservedFor.UID) {
// We know that the pod was deleted. This is
// easy to check and thus is done first.
keepEntry = false
} else {
pod, err := ec.podLister.Pods(claim.Namespace).Get(reservedFor.Name)
if err != nil && !errors.IsNotFound(err) {
return err
}
if pod == nil {
// We might not have it in our informer cache
// yet. Removing the pod while the scheduler is
// scheduling it would be bad. We have to be
// absolutely sure and thus have to check with
// the API server.
pod, err := ec.kubeClient.CoreV1().Pods(claim.Namespace).Get(ctx, reservedFor.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
if pod == nil || pod.UID != reservedFor.UID {
keepEntry = false
}
} else if pod.UID != reservedFor.UID {
// Pod exists, but is a different incarnation under the same name.
keepEntry = false
}
}
if keepEntry {
valid = append(valid, reservedFor)
}
continue
}
// TODO: support generic object lookup
return fmt.Errorf("unsupported ReservedFor entry: %v", reservedFor)
}
if len(valid) < len(claim.Status.ReservedFor) {
// TODO (#113700): patch
claim := claim.DeepCopy()
claim.Status.ReservedFor = valid
_, err := ec.kubeClient.ResourceV1alpha1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
// podResourceClaimIndexFunc is an index function that returns ResourceClaim keys (=
// namespace/name) for ResourceClaimTemplates in a given pod.
func podResourceClaimIndexFunc(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
keys := []string{}
for _, podClaim := range pod.Spec.ResourceClaims {
if podClaim.Source.ResourceClaimTemplateName != nil {
claimName := resourceclaim.Name(pod, &podClaim)
keys = append(keys, fmt.Sprintf("%s/%s", pod.Namespace, claimName))
}
}
return keys, nil
}

View File

@ -0,0 +1,387 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceclaim
import (
"context"
"errors"
"sort"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
ephemeralvolumemetrics "k8s.io/kubernetes/pkg/controller/resourceclaim/metrics"
)
var (
testPodName = "test-pod"
testNamespace = "my-namespace"
testPodUID = types.UID("uidpod1")
otherNamespace = "not-my-namespace"
podResourceClaimName = "acme-resource"
templateName = "my-template"
className = "my-resource-class"
testPod = makePod(testPodName, testNamespace, testPodUID)
testPodWithResource = makePod(testPodName, testNamespace, testPodUID, *makePodResourceClaim(podResourceClaimName, templateName))
otherTestPod = makePod(testPodName+"-II", testNamespace, testPodUID+"-II")
testClaim = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, makeOwnerReference(testPodWithResource, true))
testClaimReserved = func() *resourcev1alpha1.ResourceClaim {
claim := testClaim.DeepCopy()
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
resourcev1alpha1.ResourceClaimConsumerReference{
Resource: "pods",
Name: testPodWithResource.Name,
UID: testPodWithResource.UID,
},
)
return claim
}()
testClaimReservedTwice = func() *resourcev1alpha1.ResourceClaim {
claim := testClaimReserved.DeepCopy()
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
resourcev1alpha1.ResourceClaimConsumerReference{
Resource: "pods",
Name: otherTestPod.Name,
UID: otherTestPod.UID,
},
)
return claim
}()
conflictingClaim = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, nil)
otherNamespaceClaim = makeClaim(testPodName+"-"+podResourceClaimName, otherNamespace, className, nil)
template = makeTemplate(templateName, testNamespace, className)
)
func init() {
klog.InitFlags(nil)
}
func TestSyncHandler(t *testing.T) {
tests := []struct {
name string
key string
claims []*resourcev1alpha1.ResourceClaim
pods []*v1.Pod
podsLater []*v1.Pod
templates []*resourcev1alpha1.ResourceClaimTemplate
expectedClaims []resourcev1alpha1.ResourceClaim
expectedError bool
expectedMetrics expectedMetrics
}{
{
name: "create",
pods: []*v1.Pod{testPodWithResource},
templates: []*resourcev1alpha1.ResourceClaimTemplate{template},
key: podKey(testPodWithResource),
expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaim},
expectedMetrics: expectedMetrics{1, 0},
},
{
name: "missing-template",
pods: []*v1.Pod{testPodWithResource},
templates: nil,
key: podKey(testPodWithResource),
expectedError: true,
},
{
name: "nop",
pods: []*v1.Pod{testPodWithResource},
key: podKey(testPodWithResource),
claims: []*resourcev1alpha1.ResourceClaim{testClaim},
expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaim},
expectedMetrics: expectedMetrics{0, 0},
},
{
name: "no-such-pod",
key: podKey(testPodWithResource),
},
{
name: "pod-deleted",
pods: func() []*v1.Pod {
deleted := metav1.Now()
pods := []*v1.Pod{testPodWithResource.DeepCopy()}
pods[0].DeletionTimestamp = &deleted
return pods
}(),
key: podKey(testPodWithResource),
},
{
name: "no-volumes",
pods: []*v1.Pod{testPod},
key: podKey(testPod),
},
{
name: "create-with-other-claim",
pods: []*v1.Pod{testPodWithResource},
templates: []*resourcev1alpha1.ResourceClaimTemplate{template},
key: podKey(testPodWithResource),
claims: []*resourcev1alpha1.ResourceClaim{otherNamespaceClaim},
expectedClaims: []resourcev1alpha1.ResourceClaim{*otherNamespaceClaim, *testClaim},
expectedMetrics: expectedMetrics{1, 0},
},
{
name: "wrong-claim-owner",
pods: []*v1.Pod{testPodWithResource},
key: podKey(testPodWithResource),
claims: []*resourcev1alpha1.ResourceClaim{conflictingClaim},
expectedClaims: []resourcev1alpha1.ResourceClaim{*conflictingClaim},
expectedError: true,
},
{
name: "create-conflict",
pods: []*v1.Pod{testPodWithResource},
templates: []*resourcev1alpha1.ResourceClaimTemplate{template},
key: podKey(testPodWithResource),
expectedMetrics: expectedMetrics{1, 1},
expectedError: true,
},
{
name: "stay-reserved-seen",
pods: []*v1.Pod{testPodWithResource},
key: claimKey(testClaimReserved),
claims: []*resourcev1alpha1.ResourceClaim{testClaimReserved},
expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaimReserved},
expectedMetrics: expectedMetrics{0, 0},
},
{
name: "stay-reserved-not-seen",
podsLater: []*v1.Pod{testPodWithResource},
key: claimKey(testClaimReserved),
claims: []*resourcev1alpha1.ResourceClaim{testClaimReserved},
expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaimReserved},
expectedMetrics: expectedMetrics{0, 0},
},
{
name: "clear-reserved",
pods: []*v1.Pod{},
key: claimKey(testClaimReserved),
claims: []*resourcev1alpha1.ResourceClaim{testClaimReserved},
expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaim},
expectedMetrics: expectedMetrics{0, 0},
},
{
name: "remove-reserved",
pods: []*v1.Pod{testPod},
key: claimKey(testClaimReservedTwice),
claims: []*resourcev1alpha1.ResourceClaim{testClaimReservedTwice},
expectedClaims: []resourcev1alpha1.ResourceClaim{*testClaimReserved},
expectedMetrics: expectedMetrics{0, 0},
},
}
for _, tc := range tests {
// Run sequentially because of global logging and global metrics.
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objects []runtime.Object
for _, pod := range tc.pods {
objects = append(objects, pod)
}
for _, claim := range tc.claims {
objects = append(objects, claim)
}
for _, template := range tc.templates {
objects = append(objects, template)
}
fakeKubeClient := createTestClient(objects...)
if tc.expectedMetrics.numFailures > 0 {
fakeKubeClient.PrependReactor("create", "resourceclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, apierrors.NewConflict(action.GetResource().GroupResource(), "fake name", errors.New("fake conflict"))
})
}
setupMetrics()
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
podInformer := informerFactory.Core().V1().Pods()
claimInformer := informerFactory.Resource().V1alpha1().ResourceClaims()
templateInformer := informerFactory.Resource().V1alpha1().ResourceClaimTemplates()
ec, err := NewController(fakeKubeClient, podInformer, claimInformer, templateInformer)
if err != nil {
t.Fatalf("error creating ephemeral controller : %v", err)
}
// Ensure informers are up-to-date.
go informerFactory.Start(ctx.Done())
stopInformers := func() {
cancel()
informerFactory.Shutdown()
}
defer stopInformers()
informerFactory.WaitForCacheSync(ctx.Done())
cache.WaitForCacheSync(ctx.Done(), podInformer.Informer().HasSynced, claimInformer.Informer().HasSynced, templateInformer.Informer().HasSynced)
// Simulate race: stop informers, add more pods that the controller doesn't know about.
stopInformers()
for _, pod := range tc.podsLater {
_, err := fakeKubeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error while creating pod: %v", err)
}
}
err = ec.syncHandler(context.TODO(), tc.key)
if err != nil && !tc.expectedError {
t.Fatalf("unexpected error while running handler: %v", err)
}
if err == nil && tc.expectedError {
t.Fatalf("unexpected success")
}
claims, err := fakeKubeClient.ResourceV1alpha1().ResourceClaims("").List(ctx, metav1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error while listing claims: %v", err)
}
assert.Equal(t, normalizeClaims(tc.expectedClaims), normalizeClaims(claims.Items))
expectMetrics(t, tc.expectedMetrics)
})
}
}
func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourcev1alpha1.ResourceClaim {
claim := &resourcev1alpha1.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
Spec: resourcev1alpha1.ResourceClaimSpec{
ResourceClassName: classname,
},
}
if owner != nil {
claim.OwnerReferences = []metav1.OwnerReference{*owner}
}
return claim
}
func makePodResourceClaim(name, templateName string) *v1.PodResourceClaim {
return &v1.PodResourceClaim{
Name: name,
Source: v1.ClaimSource{
ResourceClaimTemplateName: &templateName,
},
}
}
func makePod(name, namespace string, uid types.UID, podClaims ...v1.PodResourceClaim) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, UID: uid},
Spec: v1.PodSpec{
ResourceClaims: podClaims,
},
}
return pod
}
func makeTemplate(name, namespace, classname string) *resourcev1alpha1.ResourceClaimTemplate {
template := &resourcev1alpha1.ResourceClaimTemplate{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
Spec: resourcev1alpha1.ResourceClaimTemplateSpec{
Spec: resourcev1alpha1.ResourceClaimSpec{
ResourceClassName: classname,
},
},
}
return template
}
func podKey(pod *v1.Pod) string {
return podKeyPrefix + pod.Namespace + "/" + pod.Name
}
func claimKey(claim *resourcev1alpha1.ResourceClaim) string {
return claimKeyPrefix + claim.Namespace + "/" + claim.Name
}
func makeOwnerReference(pod *v1.Pod, isController bool) *metav1.OwnerReference {
isTrue := true
return &metav1.OwnerReference{
APIVersion: "v1",
Kind: "Pod",
Name: pod.Name,
UID: pod.UID,
Controller: &isController,
BlockOwnerDeletion: &isTrue,
}
}
func normalizeClaims(claims []resourcev1alpha1.ResourceClaim) []resourcev1alpha1.ResourceClaim {
sort.Slice(claims, func(i, j int) bool {
return claims[i].Namespace < claims[j].Namespace ||
claims[i].Name < claims[j].Name
})
for i := range claims {
if len(claims[i].Status.ReservedFor) == 0 {
claims[i].Status.ReservedFor = nil
}
}
return claims
}
func createTestClient(objects ...runtime.Object) *fake.Clientset {
fakeClient := fake.NewSimpleClientset(objects...)
return fakeClient
}
// Metrics helpers
type expectedMetrics struct {
numCreated int
numFailures int
}
func expectMetrics(t *testing.T, em expectedMetrics) {
t.Helper()
actualCreated, err := testutil.GetCounterMetricValue(ephemeralvolumemetrics.ResourceClaimCreateAttempts)
handleErr(t, err, "ResourceClaimCreate")
if actualCreated != float64(em.numCreated) {
t.Errorf("Expected claims to be created %d, got %v", em.numCreated, actualCreated)
}
actualConflicts, err := testutil.GetCounterMetricValue(ephemeralvolumemetrics.ResourceClaimCreateFailures)
handleErr(t, err, "ResourceClaimCreate/Conflict")
if actualConflicts != float64(em.numFailures) {
t.Errorf("Expected claims to have conflicts %d, got %v", em.numFailures, actualConflicts)
}
}
func handleErr(t *testing.T, err error, metricName string) {
if err != nil {
t.Errorf("Failed to get %s value, err: %v", metricName, err)
}
}
func setupMetrics() {
ephemeralvolumemetrics.RegisterMetrics()
ephemeralvolumemetrics.ResourceClaimCreateAttempts.Reset()
ephemeralvolumemetrics.ResourceClaimCreateFailures.Reset()
}

View File

@ -0,0 +1,21 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package resourceclaim implements the controller part of
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/3063-dynamic-resource-allocation
//
// It was derived from the generic ephemeral volume controller.
package resourceclaim

View File

@ -0,0 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- sig-instrumentation-reviewers
labels:
- sig/instrumentation

View File

@ -0,0 +1,58 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
// ResourceClaimSubsystem - subsystem name used for ResourceClaim creation
const ResourceClaimSubsystem = "resourceclaim_controller"
var (
// ResourceClaimCreateAttempts tracks the number of
// ResourceClaims().Create calls (both successful and unsuccessful)
ResourceClaimCreateAttempts = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: ResourceClaimSubsystem,
Name: "create_attempts_total",
Help: "Number of ResourceClaims creation requests",
StabilityLevel: metrics.ALPHA,
})
// ResourceClaimCreateFailures tracks the number of unsuccessful
// ResourceClaims().Create calls
ResourceClaimCreateFailures = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: ResourceClaimSubsystem,
Name: "create_failures_total",
Help: "Number of ResourceClaims creation request failures",
StabilityLevel: metrics.ALPHA,
})
)
var registerMetrics sync.Once
// RegisterMetrics registers ResourceClaim metrics.
func RegisterMetrics() {
registerMetrics.Do(func() {
legacyregistry.MustRegister(ResourceClaimCreateAttempts)
legacyregistry.MustRegister(ResourceClaimCreateFailures)
})
}

View File

@ -0,0 +1,53 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceclaim
import (
"sync"
"github.com/golang/groupcache/lru"
"k8s.io/apimachinery/pkg/types"
)
// uidCache is an LRU cache for uid.
type uidCache struct {
mutex sync.Mutex
cache *lru.Cache
}
// newUIDCache returns a uidCache.
func newUIDCache(maxCacheEntries int) *uidCache {
return &uidCache{
cache: lru.New(maxCacheEntries),
}
}
// Add adds a uid to the cache.
func (c *uidCache) Add(uid types.UID) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.cache.Add(uid, nil)
}
// Has returns if a uid is in the cache.
func (c *uidCache) Has(uid types.UID) bool {
c.mutex.Lock()
defer c.mutex.Unlock()
_, found := c.cache.Get(uid)
return found
}

View File

@ -38,6 +38,7 @@ import (
_ "k8s.io/kubernetes/pkg/apis/node/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"
)

View File

@ -54,6 +54,7 @@ import (
policyapiv1 "k8s.io/api/policy/v1"
policyapiv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
schedulingapiv1 "k8s.io/api/scheduling/v1"
storageapiv1 "k8s.io/api/storage/v1"
storageapiv1alpha1 "k8s.io/api/storage/v1alpha1"
@ -108,6 +109,7 @@ import (
noderest "k8s.io/kubernetes/pkg/registry/node/rest"
policyrest "k8s.io/kubernetes/pkg/registry/policy/rest"
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
resourcerest "k8s.io/kubernetes/pkg/registry/resource/rest"
schedulingrest "k8s.io/kubernetes/pkg/registry/scheduling/rest"
storagerest "k8s.io/kubernetes/pkg/registry/storage/rest"
)
@ -435,6 +437,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
appsrest.StorageProvider{},
admissionregistrationrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer, DiscoveryClient: discoveryClientForAdmissionRegistration},
eventsrest.RESTStorageProvider{TTL: c.ExtraConfig.EventTTL},
resourcerest.RESTStorageProvider{},
}
if err := m.InstallAPIs(c.ExtraConfig.APIResourceConfigSource, c.GenericConfig.RESTOptionsGetter, restStorageProviders...); err != nil {
return nil, err
@ -704,6 +707,7 @@ var (
admissionregistrationv1alpha1.SchemeGroupVersion,
apiserverinternalv1alpha1.SchemeGroupVersion,
authenticationv1alpha1.SchemeGroupVersion,
resourcev1alpha1.SchemeGroupVersion,
networkingapiv1alpha1.SchemeGroupVersion,
storageapiv1alpha1.SchemeGroupVersion,
flowcontrolv1alpha1.SchemeGroupVersion,

View File

@ -274,6 +274,14 @@ const (
// Enables usage of hugepages-<size> in downward API.
DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
// owner: @pohly
// kep: http://kep.k8s.io/3063
// alpha: v1.26
//
// Enables support for resources with custom parameters and a lifecycle
// that is independent of a Pod.
DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation"
// owner: @andrewsykim
// kep: https://kep.k8s.io/1672
// alpha: v1.20
@ -979,6 +987,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
EndpointSliceTerminatingCondition: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.28
DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha},
EphemeralContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
EventedPLEG: {Default: false, PreRelease: featuregate.Alpha},

File diff suppressed because it is too large Load Diff

View File

@ -78,6 +78,9 @@ rules:
- k8s.io/kubernetes/pkg/apis/rbac/v1
- k8s.io/kubernetes/pkg/apis/rbac/v1alpha1
- k8s.io/kubernetes/pkg/apis/rbac/v1beta1
- k8s.io/kubernetes/pkg/apis/resource
- k8s.io/kubernetes/pkg/apis/resource/install
- k8s.io/kubernetes/pkg/apis/resource/v1alpha1
- k8s.io/kubernetes/pkg/apis/scheduling
- k8s.io/kubernetes/pkg/apis/scheduling/install
- k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1

View File

@ -38,6 +38,7 @@ import (
_ "k8s.io/kubernetes/pkg/apis/node/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"

View File

@ -22,7 +22,9 @@ import (
"strings"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
// TODO: Migrate kubelet to either use its own internal objects or client library.
v1 "k8s.io/api/core/v1"
internalapi "k8s.io/cri-api/pkg/apis"
@ -31,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/dra"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
@ -115,6 +118,16 @@ type ContainerManager interface {
// GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement.
GetNodeAllocatableAbsolute() v1.ResourceList
// PrepareResource prepares pod resources
PrepareResources(pod *v1.Pod, container *v1.Container) (*dra.ContainerInfo, error)
// UnrepareResources unprepares pod resources
UnprepareResources(*v1.Pod) error
// PodMightNeedToUnprepareResources returns true if the pod with the given UID
// might need to unprepare resources.
PodMightNeedToUnprepareResources(UID types.UID) bool
// Implements the podresources Provider API for CPUs, Memory and Devices
podresources.CPUsProvider
podresources.DevicesProvider

View File

@ -39,10 +39,12 @@ import (
libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
internalapi "k8s.io/cri-api/pkg/apis"
@ -53,6 +55,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/dra"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
memorymanagerstate "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
@ -128,6 +131,8 @@ type containerManagerImpl struct {
memoryManager memorymanager.Manager
// Interface for Topology resource co-ordination
topologyManager topologymanager.Manager
// Interface for Dynamic Resource Allocation management.
draManager dra.Manager
}
type features struct {
@ -195,7 +200,7 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
// TODO(vmarmol): Add limits to the system containers.
// Takes the absolute name of the specified containers.
// Empty container name disables use of the specified container.
func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, recorder record.EventRecorder) (ContainerManager, error) {
func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, recorder record.EventRecorder, kubeClient clientset.Interface) (ContainerManager, error) {
subsystems, err := GetCgroupSubsystems()
if err != nil {
return nil, fmt.Errorf("failed to get mounted cgroup subsystems: %v", err)
@ -307,6 +312,15 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
}
cm.topologyManager.AddHintProvider(cm.deviceManager)
// initialize DRA manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
klog.InfoS("Creating Dynamic Resource Allocation (DRA) manager")
cm.draManager, err = dra.NewManagerImpl(kubeClient)
if err != nil {
return nil, err
}
}
// Initialize CPU manager
cm.cpuManager, err = cpumanager.NewManager(
nodeConfig.CPUManagerPolicy,
@ -642,6 +656,13 @@ func (cm *containerManagerImpl) GetPluginRegistrationHandler() cache.PluginHandl
// TODO: move the GetResources logic to PodContainerManager.
func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
opts := &kubecontainer.RunContainerOptions{}
if cm.draManager != nil {
resOpts, err := cm.PrepareResources(pod, container)
if err != nil {
return nil, err
}
opts.Annotations = append(opts.Annotations, resOpts.Annotations...)
}
// Allocate should already be called during predicateAdmitHandler.Admit(),
// just try to fetch device runtime information from cached state here
devOpts, err := cm.deviceManager.GetDeviceRunContainerOptions(pod, container)
@ -671,13 +692,14 @@ func (cm *containerManagerImpl) GetAllocateResourcesPodAdmitHandler() lifecycle.
// work as we add more and more hint providers that the TopologyManager
// needs to call Allocate() on (that may not be directly intstantiated
// inside this component).
return &resourceAllocator{cm.cpuManager, cm.memoryManager, cm.deviceManager}
return &resourceAllocator{cm.cpuManager, cm.memoryManager, cm.deviceManager, cm.draManager}
}
type resourceAllocator struct {
cpuManager cpumanager.Manager
memoryManager memorymanager.Manager
deviceManager devicemanager.Manager
draManager dra.Manager
}
func (m *resourceAllocator) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
@ -1009,3 +1031,19 @@ func containerMemoryFromBlock(blocks []memorymanagerstate.Block) []*podresources
return containerMemories
}
func (cm *containerManagerImpl) PrepareResources(pod *v1.Pod, container *v1.Container) (*dra.ContainerInfo, error) {
return cm.draManager.PrepareResources(pod, container)
}
func (cm *containerManagerImpl) UnprepareResources(pod *v1.Pod) error {
return cm.draManager.UnprepareResources(pod)
}
func (cm *containerManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
if cm.draManager != nil {
return cm.draManager.PodMightNeedToUnprepareResources(UID)
}
return false
}

View File

@ -21,9 +21,11 @@ import (
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/dra"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -154,6 +156,18 @@ func (cm *containerManagerStub) GetNodeAllocatableAbsolute() v1.ResourceList {
return nil
}
func (cm *containerManagerStub) PrepareResources(pod *v1.Pod, container *v1.Container) (*dra.ContainerInfo, error) {
return nil, nil
}
func (cm *containerManagerStub) UnprepareResources(*v1.Pod) error {
return nil
}
func (cm *containerManagerStub) PodMightNeedToUnprepareResources(UID types.UID) bool {
return false
}
func NewStubContainerManager() ContainerManager {
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
}

View File

@ -25,6 +25,7 @@ import (
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
internalapi "k8s.io/cri-api/pkg/apis"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
@ -42,6 +43,6 @@ func (unsupportedContainerManager) Start(_ *v1.Node, _ ActivePodsFunc, _ config.
return fmt.Errorf("Container Manager is unsupported in this build")
}
func NewContainerManager(_ mount.Interface, _ cadvisor.Interface, _ NodeConfig, failSwapOn bool, recorder record.EventRecorder) (ContainerManager, error) {
func NewContainerManager(_ mount.Interface, _ cadvisor.Interface, _ NodeConfig, failSwapOn bool, recorder record.EventRecorder, kubeClient clientset.Interface) (ContainerManager, error) {
return &unsupportedContainerManager{}, nil
}

View File

@ -30,6 +30,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
@ -37,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/admission"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/dra"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -93,7 +96,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
}
// NewContainerManager creates windows container manager.
func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, recorder record.EventRecorder) (ContainerManager, error) {
func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, recorder record.EventRecorder, kubeClient clientset.Interface) (ContainerManager, error) {
// It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because
// machine info is computed and cached once as part of cAdvisor object creation.
// But `RootFsInfo` and `ImagesFsInfo` are not available at this moment so they will be called later during manager starts
@ -250,3 +253,15 @@ func (cm *containerManagerImpl) GetAllocatableMemory() []*podresourcesapi.Contai
func (cm *containerManagerImpl) GetNodeAllocatableAbsolute() v1.ResourceList {
return nil
}
func (cm *containerManagerImpl) PrepareResources(pod *v1.Pod, container *v1.Container) (*dra.ContainerInfo, error) {
return nil, nil
}
func (cm *containerManagerImpl) UnprepareResources(*v1.Pod) error {
return nil
}
func (cm *containerManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
return false
}

283
pkg/kubelet/cm/dra/cdi.go Normal file
View File

@ -0,0 +1,283 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// The code below was copied from
// https://github.com/container-orchestrated-devices/container-device-interface/blob/v0.5.3/pkg/cdi/annotations.go
// https://github.com/container-orchestrated-devices/container-device-interface/blob/v0.5.3/pkg/cdi/qualified-device.go
// to avoid a dependency on that package and the indirect dependencies that
// this would have implied.
//
// Long term it would be good to avoid this duplication:
// https://github.com/container-orchestrated-devices/container-device-interface/issues/97
package dra
import (
"errors"
"fmt"
"strings"
)
const (
// annotationPrefix is the prefix for CDI container annotation keys.
annotationPrefix = "cdi.k8s.io/"
)
// updateAnnotations updates annotations with a plugin-specific CDI device
// injection request for the given devices. Upon any error a non-nil error
// is returned and annotations are left intact. By convention plugin should
// be in the format of "vendor.device-type".
func updateAnnotations(annotations map[string]string, plugin string, deviceID string, devices []string) (map[string]string, error) {
key, err := annotationKey(plugin, deviceID)
if err != nil {
return annotations, fmt.Errorf("CDI annotation failed: %v", err)
}
if _, ok := annotations[key]; ok {
return annotations, fmt.Errorf("CDI annotation failed, key %q used", key)
}
value, err := annotationValue(devices)
if err != nil {
return annotations, fmt.Errorf("CDI annotation failed: %v", err)
}
if annotations == nil {
annotations = make(map[string]string)
}
annotations[key] = value
return annotations, nil
}
// annotationKey returns a unique annotation key for an device allocation
// by a K8s device plugin. pluginName should be in the format of
// "vendor.device-type". deviceID is the ID of the device the plugin is
// allocating. It is used to make sure that the generated key is unique
// even if multiple allocations by a single plugin needs to be annotated.
func annotationKey(pluginName, deviceID string) (string, error) {
const maxNameLen = 63
if pluginName == "" {
return "", errors.New("invalid plugin name, empty")
}
if deviceID == "" {
return "", errors.New("invalid deviceID, empty")
}
name := pluginName + "_" + strings.ReplaceAll(deviceID, "/", "_")
if len(name) > maxNameLen {
return "", fmt.Errorf("invalid plugin+deviceID %q, too long", name)
}
if c := rune(name[0]); !isAlphaNumeric(c) {
return "", fmt.Errorf("invalid name %q, first '%c' should be alphanumeric", name, c)
}
if len(name) > 2 {
for _, c := range name[1 : len(name)-1] {
switch {
case isAlphaNumeric(c):
case c == '_' || c == '-' || c == '.':
default:
return "", fmt.Errorf("invalid name %q, invalid charcter '%c'", name, c)
}
}
}
if c := rune(name[len(name)-1]); !isAlphaNumeric(c) {
return "", fmt.Errorf("invalid name %q, last '%c' should be alphanumeric", name, c)
}
return annotationPrefix + name, nil
}
// annotationValue returns an annotation value for the given devices.
func annotationValue(devices []string) (string, error) {
value, sep := "", ""
for _, d := range devices {
if _, _, _, err := parseQualifiedName(d); err != nil {
return "", err
}
value += sep + d
sep = ","
}
return value, nil
}
// parseQualifiedName splits a qualified name into device vendor, class,
// and name. If the device fails to parse as a qualified name, or if any
// of the split components fail to pass syntax validation, vendor and
// class are returned as empty, together with the verbatim input as the
// name and an error describing the reason for failure.
func parseQualifiedName(device string) (string, string, string, error) {
vendor, class, name := parseDevice(device)
if vendor == "" {
return "", "", device, fmt.Errorf("unqualified device %q, missing vendor", device)
}
if class == "" {
return "", "", device, fmt.Errorf("unqualified device %q, missing class", device)
}
if name == "" {
return "", "", device, fmt.Errorf("unqualified device %q, missing device name", device)
}
if err := validateVendorName(vendor); err != nil {
return "", "", device, fmt.Errorf("invalid device %q: %v", device, err)
}
if err := validateClassName(class); err != nil {
return "", "", device, fmt.Errorf("invalid device %q: %v", device, err)
}
if err := validateDeviceName(name); err != nil {
return "", "", device, fmt.Errorf("invalid device %q: %v", device, err)
}
return vendor, class, name, nil
}
// parseDevice tries to split a device name into vendor, class, and name.
// If this fails, for instance in the case of unqualified device names,
// parseDevice returns an empty vendor and class together with name set
// to the verbatim input.
func parseDevice(device string) (string, string, string) {
if device == "" || device[0] == '/' {
return "", "", device
}
parts := strings.SplitN(device, "=", 2)
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
return "", "", device
}
name := parts[1]
vendor, class := parseQualifier(parts[0])
if vendor == "" {
return "", "", device
}
return vendor, class, name
}
// parseQualifier splits a device qualifier into vendor and class.
// The syntax for a device qualifier is
//
// "<vendor>/<class>"
//
// If parsing fails, an empty vendor and the class set to the
// verbatim input is returned.
func parseQualifier(kind string) (string, string) {
parts := strings.SplitN(kind, "/", 2)
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
return "", kind
}
return parts[0], parts[1]
}
// validateVendorName checks the validity of a vendor name.
// A vendor name may contain the following ASCII characters:
// - upper- and lowercase letters ('A'-'Z', 'a'-'z')
// - digits ('0'-'9')
// - underscore, dash, and dot ('_', '-', and '.')
func validateVendorName(vendor string) error {
if vendor == "" {
return fmt.Errorf("invalid (empty) vendor name")
}
if !isLetter(rune(vendor[0])) {
return fmt.Errorf("invalid vendor %q, should start with letter", vendor)
}
for _, c := range string(vendor[1 : len(vendor)-1]) {
switch {
case isAlphaNumeric(c):
case c == '_' || c == '-' || c == '.':
default:
return fmt.Errorf("invalid character '%c' in vendor name %q",
c, vendor)
}
}
if !isAlphaNumeric(rune(vendor[len(vendor)-1])) {
return fmt.Errorf("invalid vendor %q, should end with a letter or digit", vendor)
}
return nil
}
// validateClassName checks the validity of class name.
// A class name may contain the following ASCII characters:
// - upper- and lowercase letters ('A'-'Z', 'a'-'z')
// - digits ('0'-'9')
// - underscore and dash ('_', '-')
func validateClassName(class string) error {
if class == "" {
return fmt.Errorf("invalid (empty) device class")
}
if !isLetter(rune(class[0])) {
return fmt.Errorf("invalid class %q, should start with letter", class)
}
for _, c := range string(class[1 : len(class)-1]) {
switch {
case isAlphaNumeric(c):
case c == '_' || c == '-':
default:
return fmt.Errorf("invalid character '%c' in device class %q",
c, class)
}
}
if !isAlphaNumeric(rune(class[len(class)-1])) {
return fmt.Errorf("invalid class %q, should end with a letter or digit", class)
}
return nil
}
// validateDeviceName checks the validity of a device name.
// A device name may contain the following ASCII characters:
// - upper- and lowercase letters ('A'-'Z', 'a'-'z')
// - digits ('0'-'9')
// - underscore, dash, dot, colon ('_', '-', '.', ':')
func validateDeviceName(name string) error {
if name == "" {
return fmt.Errorf("invalid (empty) device name")
}
if !isAlphaNumeric(rune(name[0])) {
return fmt.Errorf("invalid class %q, should start with a letter or digit", name)
}
if len(name) == 1 {
return nil
}
for _, c := range string(name[1 : len(name)-1]) {
switch {
case isAlphaNumeric(c):
case c == '_' || c == '-' || c == '.' || c == ':':
default:
return fmt.Errorf("invalid character '%c' in device name %q",
c, name)
}
}
if !isAlphaNumeric(rune(name[len(name)-1])) {
return fmt.Errorf("invalid name %q, should end with a letter or digit", name)
}
return nil
}
func isLetter(c rune) bool {
return ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z')
}
func isDigit(c rune) bool {
return '0' <= c && c <= '9'
}
func isAlphaNumeric(c rune) bool {
return isLetter(c) || isDigit(c)
}

View File

@ -0,0 +1,127 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dra
import (
"fmt"
"sync"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// claimInfo holds information required
// to prepare and unprepare a resource claim.
type claimInfo struct {
sync.RWMutex
// name of the DRA driver
driverName string
// claimUID is an UID of the resource claim
claimUID types.UID
// claimName is a name of the resource claim
claimName string
// namespace is a claim namespace
namespace string
// podUIDs is a set of pod UIDs that reference a resource
podUIDs sets.Set[string]
// cdiDevices is a list of CDI devices returned by the
// GRPC API call NodePrepareResource
cdiDevices []string
// annotations is a list of container annotations associated with
// a prepared resource
annotations []kubecontainer.Annotation
}
func (res *claimInfo) addPodReference(podUID types.UID) {
res.Lock()
defer res.Unlock()
res.podUIDs.Insert(string(podUID))
}
func (res *claimInfo) deletePodReference(podUID types.UID) {
res.Lock()
defer res.Unlock()
res.podUIDs.Delete(string(podUID))
}
// claimInfoCache is a cache of processed resource claims keyed by namespace + claim name.
type claimInfoCache struct {
sync.RWMutex
claimInfo map[string]*claimInfo
}
// newClaimInfoCache is a function that returns an instance of the claimInfoCache.
func newClaimInfoCache() *claimInfoCache {
return &claimInfoCache{
claimInfo: make(map[string]*claimInfo),
}
}
func (cache *claimInfoCache) add(claim, namespace string, res *claimInfo) error {
cache.Lock()
defer cache.Unlock()
key := claim + namespace
if _, ok := cache.claimInfo[key]; ok {
return fmt.Errorf("claim %s, namespace %s already cached", claim, namespace)
}
cache.claimInfo[claim+namespace] = res
return nil
}
func (cache *claimInfoCache) get(claimName, namespace string) *claimInfo {
cache.RLock()
defer cache.RUnlock()
return cache.claimInfo[claimName+namespace]
}
func (cache *claimInfoCache) delete(claimName, namespace string) {
cache.Lock()
defer cache.Unlock()
delete(cache.claimInfo, claimName+namespace)
}
// hasPodReference checks if there is at least one claim
// that is referenced by the pod with the given UID
// This function is used indirectly by the status manager
// to check if pod can enter termination status
func (cache *claimInfoCache) hasPodReference(UID types.UID) bool {
cache.RLock()
defer cache.RUnlock()
for _, claimInfo := range cache.claimInfo {
if claimInfo.podUIDs.Has(string(UID)) {
return true
}
}
return false
}

View File

@ -0,0 +1,255 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dra
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/dynamic-resource-allocation/resourceclaim"
"k8s.io/klog/v2"
dra "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// ManagerImpl is the structure in charge of managing DRA resource Plugins.
type ManagerImpl struct {
// cache contains cached claim info
cache *claimInfoCache
// KubeClient reference
kubeClient clientset.Interface
}
// NewManagerImpl creates a new manager.
func NewManagerImpl(kubeClient clientset.Interface) (*ManagerImpl, error) {
klog.V(2).InfoS("Creating DRA manager")
manager := &ManagerImpl{
cache: newClaimInfoCache(),
kubeClient: kubeClient,
}
return manager, nil
}
// Generate container annotations using CDI UpdateAnnotations API.
func generateCDIAnnotations(
claimUID types.UID,
driverName string,
cdiDevices []string,
) ([]kubecontainer.Annotation, error) {
annotations, err := updateAnnotations(map[string]string{}, driverName, string(claimUID), cdiDevices)
if err != nil {
return nil, fmt.Errorf("can't generate CDI annotations: %+v", err)
}
kubeAnnotations := []kubecontainer.Annotation{}
for key, value := range annotations {
kubeAnnotations = append(kubeAnnotations, kubecontainer.Annotation{Name: key, Value: value})
}
return kubeAnnotations, nil
}
// prepareContainerResources attempts to prepare all of required resource
// plugin resources for the input container, issue an NodePrepareResource rpc request
// for each new resource requirement, process their responses and update the cached
// containerResources on success.
func (m *ManagerImpl) prepareContainerResources(pod *v1.Pod, container *v1.Container) error {
// Process resources for each resource claim referenced by container
for range container.Resources.Claims {
for i, podResourceClaim := range pod.Spec.ResourceClaims {
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
klog.V(3).InfoS("Processing resource", "claim", claimName, "pod", pod.Name)
if claimInfo := m.cache.get(claimName, pod.Namespace); claimInfo != nil {
// resource is already prepared, add pod UID to it
claimInfo.addPodReference(pod.UID)
continue
}
// Query claim object from the API server
resourceClaim, err := m.kubeClient.ResourceV1alpha1().ResourceClaims(pod.Namespace).Get(
context.TODO(),
claimName,
metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to fetch ResourceClaim %s referenced by pod %s: %+v", claimName, pod.Name, err)
}
// Check if pod is in the ReservedFor for the claim
if !resourceclaim.IsReservedForPod(pod, resourceClaim) {
return fmt.Errorf("pod %s(%s) is not allowed to use resource claim %s(%s)",
pod.Name, pod.UID, podResourceClaim.Name, resourceClaim.UID)
}
// Call NodePrepareResource RPC
driverName := resourceClaim.Status.DriverName
client, err := dra.NewDRAPluginClient(driverName)
if err != nil {
return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", driverName, err)
}
response, err := client.NodePrepareResource(
context.Background(),
resourceClaim.Namespace,
resourceClaim.UID,
resourceClaim.Name,
resourceClaim.Status.Allocation.ResourceHandle)
if err != nil {
return fmt.Errorf("NodePrepareResource failed, claim UID: %s, claim name: %s, resource handle: %s, err: %+v",
resourceClaim.UID, resourceClaim.Name, resourceClaim.Status.Allocation.ResourceHandle, err)
}
klog.V(3).InfoS("NodePrepareResource succeeded", "response", response)
annotations, err := generateCDIAnnotations(resourceClaim.UID, driverName, response.CdiDevices)
if err != nil {
return fmt.Errorf("failed to generate container annotations, err: %+v", err)
}
// Cache prepared resource
err = m.cache.add(
resourceClaim.Name,
resourceClaim.Namespace,
&claimInfo{
driverName: driverName,
claimUID: resourceClaim.UID,
claimName: resourceClaim.Name,
namespace: resourceClaim.Namespace,
podUIDs: sets.New(string(pod.UID)),
cdiDevices: response.CdiDevices,
annotations: annotations,
})
if err != nil {
return fmt.Errorf(
"failed to cache prepared resource, claim: %s(%s), err: %+v",
resourceClaim.Name,
resourceClaim.UID,
err,
)
}
}
}
return nil
}
// getContainerInfo gets a container info from the claimInfo cache.
// This information is used by the caller to update a container config.
func (m *ManagerImpl) getContainerInfo(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error) {
annotations := []kubecontainer.Annotation{}
for i, podResourceClaim := range pod.Spec.ResourceClaims {
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
for _, claim := range container.Resources.Claims {
if podResourceClaim.Name != claim.Name {
continue
}
claimInfo := m.cache.get(claimName, pod.Namespace)
if claimInfo == nil {
return nil, fmt.Errorf("unable to get resource for namespace: %s, claim: %s", pod.Namespace, claimName)
}
klog.V(3).InfoS("add resource annotations", "claim", claimName, "annotations", claimInfo.annotations)
annotations = append(annotations, claimInfo.annotations...)
}
}
return &ContainerInfo{Annotations: annotations}, nil
}
// PrepareResources calls plugin NodePrepareResource from the registered DRA resource plugins.
func (m *ManagerImpl) PrepareResources(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error) {
if err := m.prepareContainerResources(pod, container); err != nil {
return nil, err
}
return m.getContainerInfo(pod, container)
}
// UnprepareResources calls a plugin's NodeUnprepareResource API for each resource claim owned by a pod.
// This function is idempotent and may be called multiple times against the same pod.
// As such, calls to the underlying NodeUnprepareResource API are skipped for claims that have
// already been successfully unprepared.
func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error {
// Call NodeUnprepareResource RPC for every resource claim referenced by the pod
for i := range pod.Spec.ResourceClaims {
claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
claimInfo := m.cache.get(claimName, pod.Namespace)
// Skip calling NodeUnprepareResource if claim info is not cached
if claimInfo == nil {
continue
}
// Skip calling NodeUnprepareResource if other pods are still referencing it
if len(claimInfo.podUIDs) > 1 {
claimInfo.deletePodReference(pod.UID)
continue
}
// Call NodeUnprepareResource only for the last pod that references the claim
client, err := dra.NewDRAPluginClient(claimInfo.driverName)
if err != nil {
return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", claimInfo.driverName, err)
}
response, err := client.NodeUnprepareResource(
context.Background(),
claimInfo.namespace,
claimInfo.claimUID,
claimInfo.claimName,
claimInfo.cdiDevices)
if err != nil {
return fmt.Errorf(
"NodeUnprepareResource failed, pod: %s, claim UID: %s, claim name: %s, CDI devices: %s, err: %+v",
pod.Name,
claimInfo.claimUID,
claimInfo.claimName,
claimInfo.cdiDevices, err)
}
// Delete last pod UID only if NodeUnprepareResource call succeeds.
// This ensures that status manager doesn't enter termination status
// for the pod. This logic is implemented in the m.PodMightNeedToUnprepareResources
// and in the claimInfo.hasPodReference.
claimInfo.deletePodReference(pod.UID)
klog.V(3).InfoS("NodeUnprepareResource succeeded", "response", response)
// delete resource from the cache
m.cache.delete(claimInfo.claimName, pod.Namespace)
}
return nil
}
// PodMightNeedToUnprepareResources returns true if the pod might need to
// unprepare resources
func (m *ManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
return m.cache.hasPodReference(UID)
}

View File

@ -0,0 +1,182 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"context"
"errors"
"fmt"
"io"
"net"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1alpha1"
)
type Client interface {
NodePrepareResource(
ctx context.Context,
namespace string,
claimUID types.UID,
claimName string,
resourceHandle string,
) (*drapbv1.NodePrepareResourceResponse, error)
NodeUnprepareResource(
ctx context.Context,
namespace string,
claimUID types.UID,
claimName string,
cdiDevice []string,
) (*drapbv1.NodeUnprepareResourceResponse, error)
}
// Strongly typed address.
type draAddr string
// draPluginClient encapsulates all dra plugin methods.
type draPluginClient struct {
pluginName string
addr draAddr
nodeV1ClientCreator nodeV1ClientCreator
}
var _ Client = &draPluginClient{}
type nodeV1ClientCreator func(addr draAddr) (
nodeClient drapbv1.NodeClient,
closer io.Closer,
err error,
)
// newV1NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must be called to close
// the gRPC connection when the NodeClient is not used anymore.
// This is the default implementation for the nodeV1ClientCreator, used in
// newDRAPluginClient.
func newV1NodeClient(addr draAddr) (nodeClient drapbv1.NodeClient, closer io.Closer, err error) {
var conn *grpc.ClientConn
conn, err = newGrpcConn(addr)
if err != nil {
return nil, nil, err
}
return drapbv1.NewNodeClient(conn), conn, nil
}
func NewDRAPluginClient(pluginName string) (Client, error) {
if pluginName == "" {
return nil, fmt.Errorf("plugin name is empty")
}
existingPlugin := draPlugins.Get(pluginName)
if existingPlugin == nil {
return nil, fmt.Errorf("plugin name %s not found in the list of registered DRA plugins", pluginName)
}
return &draPluginClient{
pluginName: pluginName,
addr: draAddr(existingPlugin.endpoint),
nodeV1ClientCreator: newV1NodeClient,
}, nil
}
func (r *draPluginClient) NodePrepareResource(
ctx context.Context,
namespace string,
claimUID types.UID,
claimName string,
resourceHandle string,
) (*drapbv1.NodePrepareResourceResponse, error) {
klog.V(4).InfoS(
log("calling NodePrepareResource rpc"),
"namespace", namespace,
"claim UID", claimUID,
"claim name", claimName,
"resource handle", resourceHandle)
if r.nodeV1ClientCreator == nil {
return nil, errors.New("failed to call NodePrepareResource. nodeV1ClientCreator is nil")
}
nodeClient, closer, err := r.nodeV1ClientCreator(r.addr)
if err != nil {
return nil, err
}
defer closer.Close()
req := &drapbv1.NodePrepareResourceRequest{
Namespace: namespace,
ClaimUid: string(claimUID),
ClaimName: claimName,
ResourceHandle: resourceHandle,
}
return nodeClient.NodePrepareResource(ctx, req)
}
func (r *draPluginClient) NodeUnprepareResource(
ctx context.Context,
namespace string,
claimUID types.UID,
claimName string,
cdiDevices []string,
) (*drapbv1.NodeUnprepareResourceResponse, error) {
klog.V(4).InfoS(
log("calling NodeUnprepareResource rpc"),
"namespace", namespace,
"claim UID", claimUID,
"claim name", claimName,
"cdi devices", cdiDevices)
if r.nodeV1ClientCreator == nil {
return nil, errors.New("nodeV1ClientCreate is nil")
}
nodeClient, closer, err := r.nodeV1ClientCreator(r.addr)
if err != nil {
return nil, err
}
defer closer.Close()
req := &drapbv1.NodeUnprepareResourceRequest{
Namespace: namespace,
ClaimUid: string(claimUID),
ClaimName: claimName,
CdiDevices: cdiDevices,
}
return nodeClient.NodeUnprepareResource(ctx, req)
}
func newGrpcConn(addr draAddr) (*grpc.ClientConn, error) {
network := "unix"
klog.V(4).InfoS(log("creating new gRPC connection"), "protocol", network, "endpoint", addr)
return grpc.Dial(
string(addr),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(func(ctx context.Context, target string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, target)
}),
)
}

View File

@ -0,0 +1,178 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"errors"
"fmt"
"strings"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/klog/v2"
)
const (
// DRAPluginName is the name of the in-tree DRA Plugin.
DRAPluginName = "kubernetes.io/dra"
)
// draPlugins map keeps track of all registered DRA plugins on the node
// and their corresponding sockets.
var draPlugins = &PluginsStore{}
// RegistrationHandler is the handler which is fed to the pluginwatcher API.
type RegistrationHandler struct{}
// NewPluginHandler returns new registration handler.
func NewRegistrationHandler() *RegistrationHandler {
return &RegistrationHandler{}
}
// RegisterPlugin is called when a plugin can be registered.
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string) error {
klog.InfoS("Register new DRA plugin", "name", pluginName, "endpoint", endpoint)
highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, versions)
if err != nil {
return err
}
// Storing endpoint of newly registered DRA Plugin into the map, where plugin name will be the key
// all other DRA components will be able to get the actual socket of DRA plugins by its name.
draPlugins.Set(pluginName, &Plugin{
endpoint: endpoint,
highestSupportedVersion: highestSupportedVersion,
})
return nil
}
// Return the highest supported version.
func highestSupportedVersion(versions []string) (*utilversion.Version, error) {
if len(versions) == 0 {
return nil, errors.New(log("DRA plugin reporting empty array for supported versions"))
}
var highestSupportedVersion *utilversion.Version
var theErr error
for i := len(versions) - 1; i >= 0; i-- {
currentHighestVer, err := utilversion.ParseGeneric(versions[i])
if err != nil {
theErr = err
continue
}
if currentHighestVer.Major() > 1 {
// DRA currently only has version 1.x
continue
}
if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) {
highestSupportedVersion = currentHighestVer
}
}
if highestSupportedVersion == nil {
return nil, fmt.Errorf(
"could not find a highest supported version from versions (%v) reported by this plugin: %+v",
versions, theErr)
}
if highestSupportedVersion.Major() != 1 {
return nil, fmt.Errorf("highest supported version reported by plugin is %v, must be v1.x", highestSupportedVersion)
}
return highestSupportedVersion, nil
}
func (h *RegistrationHandler) validateVersions(
callerName string,
pluginName string,
versions []string,
) (*utilversion.Version, error) {
if len(versions) == 0 {
return nil, errors.New(
log(
"%s for DRA plugin %q failed. Plugin returned an empty list for supported versions",
callerName,
pluginName,
),
)
}
// Validate version
newPluginHighestVersion, err := highestSupportedVersion(versions)
if err != nil {
return nil, errors.New(
log(
"%s for DRA plugin %q failed. None of the versions specified %q are supported. err=%v",
callerName,
pluginName,
versions,
err,
),
)
}
existingPlugin := draPlugins.Get(pluginName)
if existingPlugin != nil {
if !existingPlugin.highestSupportedVersion.LessThan(newPluginHighestVersion) {
return nil, errors.New(
log(
"%s for DRA plugin %q failed. Another plugin with the same name is already registered with a higher supported version: %q",
callerName,
pluginName,
existingPlugin.highestSupportedVersion,
),
)
}
}
return newPluginHighestVersion, nil
}
func unregisterPlugin(pluginName string) {
draPlugins.Delete(pluginName)
}
// DeRegisterPlugin is called when a plugin has removed its socket,
// signaling it is no longer available.
func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) {
klog.InfoS("DeRegister DRA plugin", "name", pluginName)
unregisterPlugin(pluginName)
}
// ValidatePlugin is called by kubelet's plugin watcher upon detection
// of a new registration socket opened by DRA plugin.
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
klog.InfoS("Validate DRA plugin", "name", pluginName, "endpoint", endpoint, "versions", strings.Join(versions, ","))
_, err := h.validateVersions("ValidatePlugin", pluginName, versions)
if err != nil {
return fmt.Errorf("validation failed for DRA plugin %s at endpoint %s: %+v", pluginName, endpoint, err)
}
return err
}
// log prepends log string with `kubernetes.io/dra`.
func log(msg string, parts ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("%s: %s", DRAPluginName, msg), parts...)
}

View File

@ -0,0 +1,76 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"sync"
utilversion "k8s.io/apimachinery/pkg/util/version"
)
// Plugin is a description of a DRA Plugin, defined by an endpoint
// and the highest DRA version supported.
type Plugin struct {
endpoint string
highestSupportedVersion *utilversion.Version
}
// PluginsStore holds a list of DRA Plugins.
type PluginsStore struct {
sync.RWMutex
store map[string]*Plugin
}
// Get lets you retrieve a DRA Plugin by name.
// This method is protected by a mutex.
func (s *PluginsStore) Get(pluginName string) *Plugin {
s.RLock()
defer s.RUnlock()
return s.store[pluginName]
}
// Set lets you save a DRA Plugin to the list and give it a specific name.
// This method is protected by a mutex.
func (s *PluginsStore) Set(pluginName string, plugin *Plugin) {
s.Lock()
defer s.Unlock()
if s.store == nil {
s.store = make(map[string]*Plugin)
}
s.store[pluginName] = plugin
}
// Delete lets you delete a DRA Plugin by name.
// This method is protected by a mutex.
func (s *PluginsStore) Delete(pluginName string) {
s.Lock()
defer s.Unlock()
delete(s.store, pluginName)
}
// Clear deletes all entries in the store.
// This methiod is protected by a mutex.
func (s *PluginsStore) Clear() {
s.Lock()
defer s.Unlock()
s.store = make(map[string]*Plugin)
}

View File

@ -0,0 +1,44 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dra
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// Manager manages all the DRA resource plugins running on a node.
type Manager interface {
// PrepareResources prepares resources for a container in a pod.
// It communicates with the DRA resource plugin to prepare resources and
// returns resource info to trigger CDI injection by the runtime.
PrepareResources(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error)
// UnprepareResources calls NodeUnprepareResource GRPC from DRA plugin to unprepare pod resources
UnprepareResources(pod *v1.Pod) error
// PodMightNeedToUnprepareResources returns true if the pod with the given UID
// might need to unprepare resources.
PodMightNeedToUnprepareResources(UID types.UID) bool
}
// ContainerInfo contains information required by the runtime to consume prepared resources.
type ContainerInfo struct {
// The Annotations for the container
Annotations []kubecontainer.Annotation
}

View File

@ -22,9 +22,11 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/dra"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -236,3 +238,15 @@ func (cm *FakeContainerManager) GetNodeAllocatableAbsolute() v1.ResourceList {
defer cm.Unlock()
return nil
}
func (cm *FakeContainerManager) PrepareResources(pod *v1.Pod, container *v1.Container) (*dra.ContainerInfo, error) {
return nil, nil
}
func (cm *FakeContainerManager) UnprepareResources(*v1.Pod) error {
return nil
}
func (cm *FakeContainerManager) PodMightNeedToUnprepareResources(UID types.UID) bool {
return false
}

View File

@ -72,6 +72,7 @@ import (
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
"k8s.io/kubernetes/pkg/kubelet/cloudresource"
"k8s.io/kubernetes/pkg/kubelet/cm"
draplugin "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -1473,8 +1474,13 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
kl.containerLogManager.Start()
// Adding Registration Callback function for CSI Driver
kl.pluginManager.AddHandler(pluginwatcherapi.CSIPlugin, plugincache.PluginHandler(csi.PluginHandler))
// Adding Registration Callback function for DRA Plugin
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
kl.pluginManager.AddHandler(pluginwatcherapi.DRAPlugin, plugincache.PluginHandler(draplugin.NewRegistrationHandler()))
}
// Adding Registration Callback function for Device Manager
kl.pluginManager.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandler())
// Start the plugin manager
klog.V(4).InfoS("Starting plugin manager")
go kl.pluginManager.Run(kl.sourcesReady, wait.NeverStop)
@ -1936,6 +1942,15 @@ func (kl *Kubelet) syncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus
return fmt.Errorf("detected running containers after a successful KillPod, CRI violation: %v", runningContainers)
}
// NOTE: resources must be unprepared AFTER all containers have stopped
// and BEFORE the pod status is changed on the API server
// to avoid race conditions with the resource deallocation code in kubernetes core.
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if err := kl.containerManager.UnprepareResources(pod); err != nil {
return err
}
}
// we have successfully stopped all containers, the pod is terminating, our status is "done"
klog.V(4).InfoS("Pod termination stopped all running containers", "pod", klog.KObj(pod), "podUID", pod.UID)
@ -1953,6 +1968,7 @@ func (kl *Kubelet) syncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
// generate the final status of the pod
// TODO: should we simply fold this into TerminatePod? that would give a single pod update
apiPodStatus := kl.generateAPIPodStatus(pod, podStatus)
kl.statusManager.SetPodStatus(pod, apiPodStatus)
// volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied

View File

@ -927,7 +927,21 @@ func countRunningContainerStatus(status v1.PodStatus) int {
// PodCouldHaveRunningContainers returns true if the pod with the given UID could still have running
// containers. This returns false if the pod has not yet been started or the pod is unknown.
func (kl *Kubelet) PodCouldHaveRunningContainers(pod *v1.Pod) bool {
return kl.podWorkers.CouldHaveRunningContainers(pod.UID)
if kl.podWorkers.CouldHaveRunningContainers(pod.UID) {
return true
}
// Check if pod might need to unprepare resources before termination
// NOTE: This is a temporary solution. This call is here to avoid changing
// status manager and its tests.
// TODO: extend PodDeletionSafetyProvider interface and implement it
// in a separate Kubelet method.
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if kl.containerManager.PodMightNeedToUnprepareResources(pod.UID) {
return true
}
}
return false
}
// PodResourcesAreReclaimed returns true if all required node-level resources that a pod was consuming have

View File

@ -32,6 +32,7 @@ import (
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"
)

View File

@ -40,6 +40,7 @@ import (
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
schedulingv1 "k8s.io/api/scheduling/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -65,6 +66,7 @@ import (
nodeapi "k8s.io/kubernetes/pkg/apis/node"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/apis/storage"
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
@ -622,6 +624,41 @@ func AddHandlers(h printers.PrintHandler) {
h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDR)
h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDRList)
resourceClassColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "DriverName", Type: "string", Description: resourcev1alpha1.ResourceClass{}.SwaggerDoc()["driverName"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(resourceClassColumnDefinitions, printResourceClass)
_ = h.TableHandler(resourceClassColumnDefinitions, printResourceClassList)
resourceClaimColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "ResourceClassName", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["resourceClassName"]},
{Name: "AllocationMode", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["allocationMode"]},
{Name: "State", Type: "string", Description: "A summary of the current state (allocated, pending, reserved, etc.)."},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(resourceClaimColumnDefinitions, printResourceClaim)
_ = h.TableHandler(resourceClaimColumnDefinitions, printResourceClaimList)
resourceClaimTemplateColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "ResourceClassName", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["resourceClassName"]},
{Name: "AllocationMode", Type: "string", Description: resourcev1alpha1.ResourceClaimSpec{}.SwaggerDoc()["allocationMode"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplate)
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplateList)
podSchedulingColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "SelectedNode", Type: "string", Description: resourcev1alpha1.PodSchedulingSpec{}.SwaggerDoc()["selectedNode"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodScheduling)
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodSchedulingList)
}
// Pass ports=nil for all ports.
@ -2797,6 +2834,110 @@ func printScale(obj *autoscaling.Scale, options printers.GenerateOptions) ([]met
return []metav1.TableRow{row}, nil
}
func printResourceClass(obj *resource.ResourceClass, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
row.Cells = append(row.Cells, obj.Name, obj.DriverName, translateTimestampSince(obj.CreationTimestamp))
return []metav1.TableRow{row}, nil
}
func printResourceClassList(list *resource.ResourceClassList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printResourceClass(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printResourceClaim(obj *resource.ResourceClaim, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
row.Cells = append(row.Cells, obj.Name, obj.Spec.ResourceClassName, string(obj.Spec.AllocationMode), resourceClaimState(obj), translateTimestampSince(obj.CreationTimestamp))
return []metav1.TableRow{row}, nil
}
func resourceClaimState(obj *resource.ResourceClaim) string {
var states []string
if obj.DeletionTimestamp != nil {
states = append(states, "deleted")
}
if obj.Status.Allocation == nil {
if obj.DeletionTimestamp == nil {
states = append(states, "pending")
}
} else {
states = append(states, "allocated")
if len(obj.Status.ReservedFor) > 0 {
states = append(states, "reserved")
} else if obj.DeletionTimestamp != nil || obj.Status.DeallocationRequested {
states = append(states, "deallocating")
}
}
return strings.Join(states, ",")
}
func printResourceClaimList(list *resource.ResourceClaimList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printResourceClaim(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printResourceClaimTemplate(obj *resource.ResourceClaimTemplate, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
row.Cells = append(row.Cells, obj.Name, obj.Spec.Spec.ResourceClassName, string(obj.Spec.Spec.AllocationMode), translateTimestampSince(obj.CreationTimestamp))
return []metav1.TableRow{row}, nil
}
func printResourceClaimTemplateList(list *resource.ResourceClaimTemplateList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printResourceClaimTemplate(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printPodScheduling(obj *resource.PodScheduling, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
row.Cells = append(row.Cells, obj.Name, obj.Spec.SelectedNode, translateTimestampSince(obj.CreationTimestamp))
return []metav1.TableRow{row}, nil
}
func printPodSchedulingList(list *resource.PodSchedulingList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printPodScheduling(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printBoolPtr(value *bool) string {
if value != nil {
return printBool(*value)

Some files were not shown because too many files have changed in this diff Show More