Merge pull request #61860 from mindprince/kubernetes.io-resources

Automatic merge from submit-queue (batch tested with PRs 60073, 58519, 61860). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Resources prefixed with *kubernetes.io/ should remain unscheduled if they are not exposed on the node.

Currently, resources prefixed with `*kubernetes.io/` get scheduled to any
node whether it's exposing that resource or not.

On the other hand, resources prefixed with `someother.domain/` don't get
scheduled to a node until that node is exposing that resource (or if the
resource is ignored because of scheduler extender).

This commit brings the behavior of `*kubernetes.io/` prefixed resources in
line with other extended resources and they will remain unscheduled
until some node exposes these resources.

Fixes #50658

```release-note
Pods requesting resources prefixed with `*kubernetes.io` will remain unscheduled if there are no nodes exposing that resource.
```

/sig scheduling
/assign jiayingz vishh bsalamat ConnorDoyle k82cn
This commit is contained in:
Kubernetes Submit Queue
2018-04-02 17:07:05 -07:00
committed by GitHub
7 changed files with 45 additions and 19 deletions

View File

@@ -37,9 +37,11 @@ import (
)
var (
extendedResourceA = v1.ResourceName("example.com/aaa")
extendedResourceB = v1.ResourceName("example.com/bbb")
hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
extendedResourceA = v1.ResourceName("example.com/aaa")
extendedResourceB = v1.ResourceName("example.com/bbb")
kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something")
kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something")
hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
)
func makeResources(milliCPU, memory, nvidiaGPUs, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
@@ -297,6 +299,24 @@ func TestPodFitsResources(t *testing.T) {
test: "extended resource allocatable enforced for unknown resource for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
},
{
pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
fits: false,
test: "kubernetes.io resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
},
{
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
fits: false,
test: "kubernetes.io resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
},
{
pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),