implement Node affinity and NodeSelector
This commit is contained in:
parent
b2600a65f5
commit
c8c82c1d8f
28
docs/user-guide/node-selection/pod-with-node-affinity.yaml
Normal file
28
docs/user-guide/node-selection/pod-with-node-affinity.yaml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: with-labels
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/affinity: >
|
||||||
|
{
|
||||||
|
"nodeAffinity": {
|
||||||
|
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [
|
||||||
|
{
|
||||||
|
"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "kubernetes.io/e2e-az-name",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["e2e-az1", "e2e-az2"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
another-annotation-key: another-annotation-value
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: with-labels
|
||||||
|
image: gcr.io/google_containers/pause:2.0
|
@ -320,6 +320,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"../docs/user-guide/node-selection": {
|
"../docs/user-guide/node-selection": {
|
||||||
"pod": &api.Pod{},
|
"pod": &api.Pod{},
|
||||||
|
"pod-with-node-affinity": &api.Pod{},
|
||||||
},
|
},
|
||||||
"../examples/openshift-origin": {
|
"../examples/openshift-origin": {
|
||||||
"openshift-origin-namespace": &api.Namespace{},
|
"openshift-origin-namespace": &api.Namespace{},
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
func init() {
|
func init() {
|
||||||
if err := Scheme.AddGeneratedDeepCopyFuncs(
|
if err := Scheme.AddGeneratedDeepCopyFuncs(
|
||||||
DeepCopy_api_AWSElasticBlockStoreVolumeSource,
|
DeepCopy_api_AWSElasticBlockStoreVolumeSource,
|
||||||
|
DeepCopy_api_Affinity,
|
||||||
DeepCopy_api_Binding,
|
DeepCopy_api_Binding,
|
||||||
DeepCopy_api_Capabilities,
|
DeepCopy_api_Capabilities,
|
||||||
DeepCopy_api_CephFSVolumeSource,
|
DeepCopy_api_CephFSVolumeSource,
|
||||||
@ -95,10 +96,14 @@ func init() {
|
|||||||
DeepCopy_api_NamespaceStatus,
|
DeepCopy_api_NamespaceStatus,
|
||||||
DeepCopy_api_Node,
|
DeepCopy_api_Node,
|
||||||
DeepCopy_api_NodeAddress,
|
DeepCopy_api_NodeAddress,
|
||||||
|
DeepCopy_api_NodeAffinity,
|
||||||
DeepCopy_api_NodeCondition,
|
DeepCopy_api_NodeCondition,
|
||||||
DeepCopy_api_NodeDaemonEndpoints,
|
DeepCopy_api_NodeDaemonEndpoints,
|
||||||
DeepCopy_api_NodeList,
|
DeepCopy_api_NodeList,
|
||||||
DeepCopy_api_NodeResources,
|
DeepCopy_api_NodeResources,
|
||||||
|
DeepCopy_api_NodeSelector,
|
||||||
|
DeepCopy_api_NodeSelectorRequirement,
|
||||||
|
DeepCopy_api_NodeSelectorTerm,
|
||||||
DeepCopy_api_NodeSpec,
|
DeepCopy_api_NodeSpec,
|
||||||
DeepCopy_api_NodeStatus,
|
DeepCopy_api_NodeStatus,
|
||||||
DeepCopy_api_NodeSystemInfo,
|
DeepCopy_api_NodeSystemInfo,
|
||||||
@ -129,6 +134,7 @@ func init() {
|
|||||||
DeepCopy_api_PodTemplate,
|
DeepCopy_api_PodTemplate,
|
||||||
DeepCopy_api_PodTemplateList,
|
DeepCopy_api_PodTemplateList,
|
||||||
DeepCopy_api_PodTemplateSpec,
|
DeepCopy_api_PodTemplateSpec,
|
||||||
|
DeepCopy_api_PreferredSchedulingTerm,
|
||||||
DeepCopy_api_Probe,
|
DeepCopy_api_Probe,
|
||||||
DeepCopy_api_RBDVolumeSource,
|
DeepCopy_api_RBDVolumeSource,
|
||||||
DeepCopy_api_RangeAllocation,
|
DeepCopy_api_RangeAllocation,
|
||||||
@ -183,6 +189,19 @@ func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolume
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DeepCopy_api_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) error {
|
||||||
|
if in.NodeAffinity != nil {
|
||||||
|
in, out := in.NodeAffinity, &out.NodeAffinity
|
||||||
|
*out = new(NodeAffinity)
|
||||||
|
if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.NodeAffinity = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func DeepCopy_api_Binding(in Binding, out *Binding, c *conversion.Cloner) error {
|
func DeepCopy_api_Binding(in Binding, out *Binding, c *conversion.Cloner) error {
|
||||||
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
|
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1260,6 +1279,39 @@ func DeepCopy_api_NodeAddress(in NodeAddress, out *NodeAddress, c *conversion.Cl
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DeepCopy_api_NodeAffinity(in NodeAffinity, out *NodeAffinity, c *conversion.Cloner) error {
|
||||||
|
if in.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||||
|
in, out := in.RequiredDuringSchedulingRequiredDuringExecution, &out.RequiredDuringSchedulingRequiredDuringExecution
|
||||||
|
*out = new(NodeSelector)
|
||||||
|
if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.RequiredDuringSchedulingRequiredDuringExecution = nil
|
||||||
|
}
|
||||||
|
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
|
in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
|
||||||
|
*out = new(NodeSelector)
|
||||||
|
if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.RequiredDuringSchedulingIgnoredDuringExecution = nil
|
||||||
|
}
|
||||||
|
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
|
in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
|
||||||
|
*out = make([]PreferredSchedulingTerm, len(in))
|
||||||
|
for i := range in {
|
||||||
|
if err := DeepCopy_api_PreferredSchedulingTerm(in[i], &(*out)[i], c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.PreferredDuringSchedulingIgnoredDuringExecution = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func DeepCopy_api_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error {
|
func DeepCopy_api_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error {
|
||||||
out.Type = in.Type
|
out.Type = in.Type
|
||||||
out.Status = in.Status
|
out.Status = in.Status
|
||||||
@ -1323,6 +1375,49 @@ func DeepCopy_api_NodeResources(in NodeResources, out *NodeResources, c *convers
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DeepCopy_api_NodeSelector(in NodeSelector, out *NodeSelector, c *conversion.Cloner) error {
|
||||||
|
if in.NodeSelectorTerms != nil {
|
||||||
|
in, out := in.NodeSelectorTerms, &out.NodeSelectorTerms
|
||||||
|
*out = make([]NodeSelectorTerm, len(in))
|
||||||
|
for i := range in {
|
||||||
|
if err := DeepCopy_api_NodeSelectorTerm(in[i], &(*out)[i], c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.NodeSelectorTerms = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeepCopy_api_NodeSelectorRequirement(in NodeSelectorRequirement, out *NodeSelectorRequirement, c *conversion.Cloner) error {
|
||||||
|
out.Key = in.Key
|
||||||
|
out.Operator = in.Operator
|
||||||
|
if in.Values != nil {
|
||||||
|
in, out := in.Values, &out.Values
|
||||||
|
*out = make([]string, len(in))
|
||||||
|
copy(*out, in)
|
||||||
|
} else {
|
||||||
|
out.Values = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeepCopy_api_NodeSelectorTerm(in NodeSelectorTerm, out *NodeSelectorTerm, c *conversion.Cloner) error {
|
||||||
|
if in.MatchExpressions != nil {
|
||||||
|
in, out := in.MatchExpressions, &out.MatchExpressions
|
||||||
|
*out = make([]NodeSelectorRequirement, len(in))
|
||||||
|
for i := range in {
|
||||||
|
if err := DeepCopy_api_NodeSelectorRequirement(in[i], &(*out)[i], c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.MatchExpressions = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func DeepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error {
|
func DeepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error {
|
||||||
out.PodCIDR = in.PodCIDR
|
out.PodCIDR = in.PodCIDR
|
||||||
out.ExternalID = in.ExternalID
|
out.ExternalID = in.ExternalID
|
||||||
@ -2117,6 +2212,14 @@ func DeepCopy_api_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *c
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DeepCopy_api_PreferredSchedulingTerm(in PreferredSchedulingTerm, out *PreferredSchedulingTerm, c *conversion.Cloner) error {
|
||||||
|
out.Weight = in.Weight
|
||||||
|
if err := DeepCopy_api_NodeSelectorTerm(in.Preference, &out.Preference, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func DeepCopy_api_Probe(in Probe, out *Probe, c *conversion.Cloner) error {
|
func DeepCopy_api_Probe(in Probe, out *Probe, c *conversion.Cloner) error {
|
||||||
if err := DeepCopy_api_Handler(in.Handler, &out.Handler, c); err != nil {
|
if err := DeepCopy_api_Handler(in.Handler, &out.Handler, c); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -18,6 +18,7 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -263,3 +264,54 @@ func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, er
|
|||||||
}
|
}
|
||||||
return unversioned.Time{t}, nil
|
return unversioned.Time{t}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
|
||||||
|
// labels.Selector.
|
||||||
|
func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) {
|
||||||
|
if len(nsm) == 0 {
|
||||||
|
return labels.Nothing(), nil
|
||||||
|
}
|
||||||
|
selector := labels.NewSelector()
|
||||||
|
for _, expr := range nsm {
|
||||||
|
var op labels.Operator
|
||||||
|
switch expr.Operator {
|
||||||
|
case NodeSelectorOpIn:
|
||||||
|
op = labels.InOperator
|
||||||
|
case NodeSelectorOpNotIn:
|
||||||
|
op = labels.NotInOperator
|
||||||
|
case NodeSelectorOpExists:
|
||||||
|
op = labels.ExistsOperator
|
||||||
|
case NodeSelectorOpDoesNotExist:
|
||||||
|
op = labels.DoesNotExistOperator
|
||||||
|
case NodeSelectorOpGt:
|
||||||
|
op = labels.GreaterThanOperator
|
||||||
|
case NodeSelectorOpLt:
|
||||||
|
op = labels.LessThanOperator
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
|
||||||
|
}
|
||||||
|
r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector = selector.Add(*r)
|
||||||
|
}
|
||||||
|
return selector, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
||||||
|
// in the Annotations of a Pod.
|
||||||
|
const AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||||
|
|
||||||
|
// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations
|
||||||
|
// and converts it to the Affinity type in api.
|
||||||
|
func GetAffinityFromPodAnnotations(annotations map[string]string) (Affinity, error) {
|
||||||
|
var affinity Affinity
|
||||||
|
if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" {
|
||||||
|
err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity)
|
||||||
|
if err != nil {
|
||||||
|
return affinity, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return affinity, nil
|
||||||
|
}
|
||||||
|
@ -17,10 +17,12 @@ limitations under the License.
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
|
|
||||||
"speter.net/go/exp/math/dec/inf"
|
"speter.net/go/exp/math/dec/inf"
|
||||||
)
|
)
|
||||||
@ -175,3 +177,123 @@ func TestRemoveDuplicateAccessModes(t *testing.T) {
|
|||||||
t.Errorf("Expected 2 distinct modes in set but found %v", len(modes))
|
t.Errorf("Expected 2 distinct modes in set but found %v", len(modes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNodeSelectorRequirementsAsSelector(t *testing.T) {
|
||||||
|
matchExpressions := []NodeSelectorRequirement{{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: NodeSelectorOpIn,
|
||||||
|
Values: []string{"bar", "baz"},
|
||||||
|
}}
|
||||||
|
mustParse := func(s string) labels.Selector {
|
||||||
|
out, e := labels.Parse(s)
|
||||||
|
if e != nil {
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
tc := []struct {
|
||||||
|
in []NodeSelectorRequirement
|
||||||
|
out labels.Selector
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{in: nil, out: labels.Nothing()},
|
||||||
|
{in: []NodeSelectorRequirement{}, out: labels.Nothing()},
|
||||||
|
{
|
||||||
|
in: matchExpressions,
|
||||||
|
out: mustParse("foo in (baz,bar)"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: []NodeSelectorRequirement{{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: NodeSelectorOpExists,
|
||||||
|
Values: []string{"bar", "baz"},
|
||||||
|
}},
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: []NodeSelectorRequirement{{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: NodeSelectorOpGt,
|
||||||
|
Values: []string{"1.1"},
|
||||||
|
}},
|
||||||
|
out: mustParse("foo>1.1"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: []NodeSelectorRequirement{{
|
||||||
|
Key: "bar",
|
||||||
|
Operator: NodeSelectorOpLt,
|
||||||
|
Values: []string{"7.1"},
|
||||||
|
}},
|
||||||
|
out: mustParse("bar<7.1"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range tc {
|
||||||
|
out, err := NodeSelectorRequirementsAsSelector(tc.in)
|
||||||
|
if err == nil && tc.expectErr {
|
||||||
|
t.Errorf("[%v]expected error but got none.", i)
|
||||||
|
}
|
||||||
|
if err != nil && !tc.expectErr {
|
||||||
|
t.Errorf("[%v]did not expect error but got: %v", i, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(out, tc.out) {
|
||||||
|
t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetAffinityFromPod(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
pod *Pod
|
||||||
|
expectErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pod: &Pod{},
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &Pod{
|
||||||
|
ObjectMeta: ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["value1", "value2"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &Pod{
|
||||||
|
ObjectMeta: ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range testCases {
|
||||||
|
_, err := GetAffinityFromPodAnnotations(tc.pod.Annotations)
|
||||||
|
if err == nil && tc.expectErr {
|
||||||
|
t.Errorf("[%v]expected error but got none.", i)
|
||||||
|
}
|
||||||
|
if err != nil && !tc.expectErr {
|
||||||
|
t.Errorf("[%v]did not expect error but got: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1043,6 +1043,93 @@ const (
|
|||||||
DNSDefault DNSPolicy = "Default"
|
DNSDefault DNSPolicy = "Default"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A node selector represents the union of the results of one or more label queries
|
||||||
|
// over a set of nodes; that is, it represents the OR of the selectors represented
|
||||||
|
// by the node selector terms.
|
||||||
|
type NodeSelector struct {
|
||||||
|
//Required. A list of node selector terms. The terms are ORed.
|
||||||
|
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A null or empty node selector term matches no objects.
|
||||||
|
type NodeSelectorTerm struct {
|
||||||
|
//Required. A list of node selector requirements. The requirements are ANDed.
|
||||||
|
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node selector requirement is a selector that contains values, a key, and an operator
|
||||||
|
// that relates the key and values.
|
||||||
|
type NodeSelectorRequirement struct {
|
||||||
|
// The label key that the selector applies to.
|
||||||
|
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"`
|
||||||
|
// Represents a key's relationship to a set of values.
|
||||||
|
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||||
|
Operator NodeSelectorOperator `json:"operator"`
|
||||||
|
// An array of string values. If the operator is In or NotIn,
|
||||||
|
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||||
|
// the values array must be empty. If the operator is Gt or Lt, the values
|
||||||
|
// array must have a single element, which will be interpreted as an integer.
|
||||||
|
// This array is replaced during a strategic merge patch.
|
||||||
|
Values []string `json:"values,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node selector operator is the set of operators that can be used in
|
||||||
|
// a node selector requirement.
|
||||||
|
type NodeSelectorOperator string
|
||||||
|
|
||||||
|
const (
|
||||||
|
NodeSelectorOpIn NodeSelectorOperator = "In"
|
||||||
|
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
|
||||||
|
NodeSelectorOpExists NodeSelectorOperator = "Exists"
|
||||||
|
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
|
||||||
|
NodeSelectorOpGt NodeSelectorOperator = "Gt"
|
||||||
|
NodeSelectorOpLt NodeSelectorOperator = "Lt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Affinity is a group of affinity scheduling requirements.
|
||||||
|
type Affinity struct {
|
||||||
|
// Describes node affinity scheduling requirements for the pod.
|
||||||
|
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node affinity is a group of node affinity scheduling requirements.
|
||||||
|
// If RequiredDuringSchedulingRequiredDuringExecution and
|
||||||
|
// RequiredDuringSchedulingIgnoredDuringExecution are both set,
|
||||||
|
// then both node selectors must be satisfied.
|
||||||
|
type NodeAffinity struct {
|
||||||
|
// If the affinity requirements specified by this field are not met at
|
||||||
|
// scheduling time, the pod will not be scheduled onto the node.
|
||||||
|
// If the affinity requirements specified by this field cease to be met
|
||||||
|
// at some point during pod execution (e.g. due to an update), the system
|
||||||
|
// will try to eventually evict the pod from its node.
|
||||||
|
RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||||
|
// If the affinity requirements specified by this field are not met at
|
||||||
|
// scheduling time, the pod will not be scheduled onto the node.
|
||||||
|
// If the affinity requirements specified by this field cease to be met
|
||||||
|
// at some point during pod execution (e.g. due to an update), the system
|
||||||
|
// may or may not try to eventually evict the pod from its node.
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||||
|
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||||
|
// the affinity expressions specified by this field, but it may choose
|
||||||
|
// a node that violates one or more of the expressions. The node that is
|
||||||
|
// most preferred is the one with the greatest sum of weights, i.e.
|
||||||
|
// for each node that meets all of the scheduling requirements (resource
|
||||||
|
// request, requiredDuringScheduling affinity expressions, etc.),
|
||||||
|
// compute a sum by iterating through the elements of this field and adding
|
||||||
|
// "weight" to the sum if the node matches the corresponding matchExpressions; the
|
||||||
|
// node(s) with the highest sum are the most preferred.
|
||||||
|
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// An empty preferred scheduling term matches all objects with implicit weight 0
|
||||||
|
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
|
||||||
|
type PreferredSchedulingTerm struct {
|
||||||
|
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
|
||||||
|
Weight int `json:"weight"`
|
||||||
|
// A node selector term, associated with the corresponding weight.
|
||||||
|
Preference NodeSelectorTerm `json:"preference"`
|
||||||
|
}
|
||||||
|
|
||||||
// PodSpec is a description of a pod
|
// PodSpec is a description of a pod
|
||||||
type PodSpec struct {
|
type PodSpec struct {
|
||||||
Volumes []Volume `json:"volumes"`
|
Volumes []Volume `json:"volumes"`
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1273,6 +1273,94 @@ const (
|
|||||||
DefaultTerminationGracePeriodSeconds = 30
|
DefaultTerminationGracePeriodSeconds = 30
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A node selector represents the union of the results of one or more label queries
|
||||||
|
// over a set of nodes; that is, it represents the OR of the selectors represented
|
||||||
|
// by the node selector terms.
|
||||||
|
type NodeSelector struct {
|
||||||
|
//Required. A list of node selector terms. The terms are ORed.
|
||||||
|
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A null or empty node selector term matches no objects.
|
||||||
|
type NodeSelectorTerm struct {
|
||||||
|
//Required. A list of node selector requirements. The requirements are ANDed.
|
||||||
|
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node selector requirement is a selector that contains values, a key, and an operator
|
||||||
|
// that relates the key and values.
|
||||||
|
type NodeSelectorRequirement struct {
|
||||||
|
// The label key that the selector applies to.
|
||||||
|
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"`
|
||||||
|
// Represents a key's relationship to a set of values.
|
||||||
|
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||||
|
Operator NodeSelectorOperator `json:"operator"`
|
||||||
|
// An array of string values. If the operator is In or NotIn,
|
||||||
|
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||||
|
// the values array must be empty. If the operator is Gt or Lt, the values
|
||||||
|
// array must have a single element, which will be interpreted as an integer.
|
||||||
|
// This array is replaced during a strategic merge patch.
|
||||||
|
Values []string `json:"values,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node selector operator is the set of operators that can be used in
|
||||||
|
// a node selector requirement.
|
||||||
|
type NodeSelectorOperator string
|
||||||
|
|
||||||
|
const (
|
||||||
|
NodeSelectorOpIn NodeSelectorOperator = "In"
|
||||||
|
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
|
||||||
|
NodeSelectorOpExists NodeSelectorOperator = "Exists"
|
||||||
|
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
|
||||||
|
NodeSelectorOpGt NodeSelectorOperator = "Gt"
|
||||||
|
NodeSelectorOpLt NodeSelectorOperator = "Lt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Affinity is a group of affinity scheduling requirements,
|
||||||
|
// including node affinity and inter pod affinity.
|
||||||
|
type Affinity struct {
|
||||||
|
// Describes node affinity scheduling requirements for the pod.
|
||||||
|
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node affinity is a group of node affinity scheduling requirements.
|
||||||
|
// If RequiredDuringSchedulingRequiredDuringExecution and
|
||||||
|
// RequiredDuringSchedulingIgnoredDuringExecution are both set,
|
||||||
|
// then both node selectors must be satisfied.
|
||||||
|
type NodeAffinity struct {
|
||||||
|
// If the affinity requirements specified by this field are not met at
|
||||||
|
// scheduling time, the pod will not be scheduled onto the node.
|
||||||
|
// If the affinity requirements specified by this field cease to be met
|
||||||
|
// at some point during pod execution (e.g. due to an update), the system
|
||||||
|
// will try to eventually evict the pod from its node.
|
||||||
|
RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
|
||||||
|
// If the affinity requirements specified by this field are not met at
|
||||||
|
// scheduling time, the pod will not be scheduled onto the node.
|
||||||
|
// If the affinity requirements specified by this field cease to be met
|
||||||
|
// at some point during pod execution (e.g. due to an update), the system
|
||||||
|
// may or may not try to eventually evict the pod from its node.
|
||||||
|
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||||
|
// The scheduler will prefer to schedule pods to nodes that satisfy
|
||||||
|
// the affinity expressions specified by this field, but it may choose
|
||||||
|
// a node that violates one or more of the expressions. The node that is
|
||||||
|
// most preferred is the one with the greatest sum of weights, i.e.
|
||||||
|
// for each node that meets all of the scheduling requirements (resource
|
||||||
|
// request, requiredDuringScheduling affinity expressions, etc.),
|
||||||
|
// compute a sum by iterating through the elements of this field and adding
|
||||||
|
// "weight" to the sum if the node matches the corresponding matchExpressions; the
|
||||||
|
// node(s) with the highest sum are the most preferred.
|
||||||
|
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// An empty preferred scheduling term matches all objects with implicit weight 0
|
||||||
|
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
|
||||||
|
type PreferredSchedulingTerm struct {
|
||||||
|
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
|
||||||
|
Weight int `json:"weight"`
|
||||||
|
// A node selector term, associated with the corresponding weight.
|
||||||
|
Preference NodeSelectorTerm `json:"preference"`
|
||||||
|
}
|
||||||
|
|
||||||
// PodSpec is a description of a pod.
|
// PodSpec is a description of a pod.
|
||||||
type PodSpec struct {
|
type PodSpec struct {
|
||||||
// List of volumes that can be mounted by containers belonging to the pod.
|
// List of volumes that can be mounted by containers belonging to the pod.
|
||||||
|
@ -39,6 +39,15 @@ func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string {
|
|||||||
return map_AWSElasticBlockStoreVolumeSource
|
return map_AWSElasticBlockStoreVolumeSource
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_Affinity = map[string]string{
|
||||||
|
"": "Affinity is a group of affinity scheduling requirements, including node affinity and inter pod affinity.",
|
||||||
|
"nodeAffinity": "Describes node affinity scheduling requirements for the pod.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Affinity) SwaggerDoc() map[string]string {
|
||||||
|
return map_Affinity
|
||||||
|
}
|
||||||
|
|
||||||
var map_Binding = map[string]string{
|
var map_Binding = map[string]string{
|
||||||
"": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.",
|
"": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.",
|
||||||
"metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
"metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||||
@ -720,6 +729,17 @@ func (NodeAddress) SwaggerDoc() map[string]string {
|
|||||||
return map_NodeAddress
|
return map_NodeAddress
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_NodeAffinity = map[string]string{
|
||||||
|
"": "Node affinity is a group of node affinity scheduling requirements. If RequiredDuringSchedulingRequiredDuringExecution and RequiredDuringSchedulingIgnoredDuringExecution are both set, then both node selectors must be satisfied.",
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system will try to eventually evict the pod from its node.",
|
||||||
|
"requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.",
|
||||||
|
"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NodeAffinity) SwaggerDoc() map[string]string {
|
||||||
|
return map_NodeAffinity
|
||||||
|
}
|
||||||
|
|
||||||
var map_NodeCondition = map[string]string{
|
var map_NodeCondition = map[string]string{
|
||||||
"": "NodeCondition contains condition infromation for a node.",
|
"": "NodeCondition contains condition infromation for a node.",
|
||||||
"type": "Type of node condition.",
|
"type": "Type of node condition.",
|
||||||
@ -753,6 +773,35 @@ func (NodeList) SwaggerDoc() map[string]string {
|
|||||||
return map_NodeList
|
return map_NodeList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_NodeSelector = map[string]string{
|
||||||
|
"": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
|
||||||
|
"nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NodeSelector) SwaggerDoc() map[string]string {
|
||||||
|
return map_NodeSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
var map_NodeSelectorRequirement = map[string]string{
|
||||||
|
"": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
|
||||||
|
"key": "The label key that the selector applies to.",
|
||||||
|
"operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.",
|
||||||
|
"values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NodeSelectorRequirement) SwaggerDoc() map[string]string {
|
||||||
|
return map_NodeSelectorRequirement
|
||||||
|
}
|
||||||
|
|
||||||
|
var map_NodeSelectorTerm = map[string]string{
|
||||||
|
"": "A null or empty node selector term matches no objects.",
|
||||||
|
"matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NodeSelectorTerm) SwaggerDoc() map[string]string {
|
||||||
|
return map_NodeSelectorTerm
|
||||||
|
}
|
||||||
|
|
||||||
var map_NodeSpec = map[string]string{
|
var map_NodeSpec = map[string]string{
|
||||||
"": "NodeSpec describes the attributes that a node is created with.",
|
"": "NodeSpec describes the attributes that a node is created with.",
|
||||||
"podCIDR": "PodCIDR represents the pod IP range assigned to the node.",
|
"podCIDR": "PodCIDR represents the pod IP range assigned to the node.",
|
||||||
@ -1138,6 +1187,16 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string {
|
|||||||
return map_PodTemplateSpec
|
return map_PodTemplateSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_PreferredSchedulingTerm = map[string]string{
|
||||||
|
"": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).",
|
||||||
|
"weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.",
|
||||||
|
"preference": "A node selector term, associated with the corresponding weight.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PreferredSchedulingTerm) SwaggerDoc() map[string]string {
|
||||||
|
return map_PreferredSchedulingTerm
|
||||||
|
}
|
||||||
|
|
||||||
var map_Probe = map[string]string{
|
var map_Probe = map[string]string{
|
||||||
"": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
|
"": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
|
||||||
"initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
|
"initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
|
||||||
|
@ -97,6 +97,10 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie
|
|||||||
if totalSize > (int64)(totalAnnotationSizeLimitB) {
|
if totalSize > (int64)(totalAnnotationSizeLimitB) {
|
||||||
allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB))
|
allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if annotations[api.AffinityAnnotationKey] != "" {
|
||||||
|
allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...)
|
||||||
|
}
|
||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1346,6 +1350,102 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
|
||||||
|
func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
switch rq.Operator {
|
||||||
|
case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn:
|
||||||
|
if len(rq.Values) == 0 {
|
||||||
|
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
|
||||||
|
}
|
||||||
|
case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist:
|
||||||
|
if len(rq.Values) > 0 {
|
||||||
|
allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
|
||||||
|
}
|
||||||
|
|
||||||
|
case api.NodeSelectorOpGt, api.NodeSelectorOpLt:
|
||||||
|
if len(rq.Values) != 1 {
|
||||||
|
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'"))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator"))
|
||||||
|
}
|
||||||
|
allErrs = append(allErrs, ValidateLabelName(rq.Key, fldPath.Child("key"))...)
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateNodeSelectorTerm tests that the specified node selector term has valid data
|
||||||
|
func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
|
if len(term.MatchExpressions) == 0 {
|
||||||
|
return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement"))
|
||||||
|
}
|
||||||
|
for j, req := range term.MatchExpressions {
|
||||||
|
allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...)
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateNodeSelector tests that the specified nodeSelector fields has valid data
|
||||||
|
func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
|
termFldPath := fldPath.Child("nodeSelectorTerms")
|
||||||
|
if len(nodeSelector.NodeSelectorTerms) == 0 {
|
||||||
|
return append(allErrs, field.Required(termFldPath, "must have at least one node selector term"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, term := range nodeSelector.NodeSelectorTerms {
|
||||||
|
allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data
|
||||||
|
func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
|
for i, term := range terms {
|
||||||
|
if term.Weight <= 0 || term.Weight > 100 {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100"))
|
||||||
|
}
|
||||||
|
|
||||||
|
allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...)
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data
|
||||||
|
func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
|
affinity, err := api.GetAffinityFromPodAnnotations(annotations)
|
||||||
|
if err != nil {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath, api.AffinityAnnotationKey, err.Error()))
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
if affinity.NodeAffinity != nil {
|
||||||
|
na := affinity.NodeAffinity
|
||||||
|
if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||||
|
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
|
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
|
||||||
|
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
|
// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
|
||||||
func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
|
func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
|
||||||
allErrs := field.ErrorList{}
|
allErrs := field.ErrorList{}
|
||||||
|
@ -1550,6 +1550,12 @@ func TestValidatePodSpec(t *testing.T) {
|
|||||||
RestartPolicy: api.RestartPolicyAlways,
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
DNSPolicy: api.DNSClusterFirst,
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
},
|
},
|
||||||
|
{ // Populate Affinity.
|
||||||
|
Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}},
|
||||||
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for i := range successCases {
|
for i := range successCases {
|
||||||
if errs := ValidatePodSpec(&successCases[i], field.NewPath("field")); len(errs) != 0 {
|
if errs := ValidatePodSpec(&successCases[i], field.NewPath("field")); len(errs) != 0 {
|
||||||
@ -1712,6 +1718,50 @@ func TestValidatePod(t *testing.T) {
|
|||||||
NodeName: "foobar",
|
NodeName: "foobar",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{ // Serialized affinity requirements in annotations.
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "123",
|
||||||
|
Namespace: "ns",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "key1",
|
||||||
|
"operator": "Exists"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "key2",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["value1", "value2"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||||
|
{
|
||||||
|
"weight": 10,
|
||||||
|
"preference": {"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In", "values": ["bar"]
|
||||||
|
}
|
||||||
|
]}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, pod := range successCases {
|
for _, pod := range successCases {
|
||||||
if errs := ValidatePod(&pod); len(errs) != 0 {
|
if errs := ValidatePod(&pod); len(errs) != 0 {
|
||||||
@ -1756,6 +1806,111 @@ func TestValidatePod(t *testing.T) {
|
|||||||
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"invalid json of affinity in pod annotations": {
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "123",
|
||||||
|
Namespace: "ns",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"invalid node selector requirement in affinity in pod annotations, operator can't be null": {
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "123",
|
||||||
|
Namespace: "ns",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {"requiredDuringSchedulingRequiredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "key1",
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"invalid preferredSchedulingTerm in affinity in pod annotations, weight should be in range 1-100": {
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "123",
|
||||||
|
Namespace: "ns",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||||
|
{
|
||||||
|
"weight": 199,
|
||||||
|
"preference": {"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["bar"]
|
||||||
|
}
|
||||||
|
]}
|
||||||
|
}
|
||||||
|
]}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"invalid requiredDuringSchedulingRequiredDuringExecution node selector, nodeSelectorTerms must have at least one term": {
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "123",
|
||||||
|
Namespace: "ns",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": []
|
||||||
|
},
|
||||||
|
}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"invalid requiredDuringSchedulingRequiredDuringExecution node selector term, matchExpressions must have at least one node selector requirement": {
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "123",
|
||||||
|
Namespace: "ns",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": []
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
|
||||||
|
RestartPolicy: api.RestartPolicyAlways,
|
||||||
|
DNSPolicy: api.DNSClusterFirst,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for k, v := range errorCases {
|
for k, v := range errorCases {
|
||||||
if errs := ValidatePod(&v); len(errs) == 0 {
|
if errs := ValidatePod(&v); len(errs) == 0 {
|
||||||
|
@ -20,8 +20,10 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/util/validation"
|
"k8s.io/kubernetes/pkg/util/validation"
|
||||||
)
|
)
|
||||||
@ -70,6 +72,8 @@ const (
|
|||||||
NotEqualsOperator Operator = "!="
|
NotEqualsOperator Operator = "!="
|
||||||
NotInOperator Operator = "notin"
|
NotInOperator Operator = "notin"
|
||||||
ExistsOperator Operator = "exists"
|
ExistsOperator Operator = "exists"
|
||||||
|
GreaterThanOperator Operator = "Gt"
|
||||||
|
LessThanOperator Operator = "Lt"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewSelector() Selector {
|
func NewSelector() Selector {
|
||||||
@ -104,7 +108,8 @@ type Requirement struct {
|
|||||||
// (2) If the operator is In or NotIn, the values set must be non-empty.
|
// (2) If the operator is In or NotIn, the values set must be non-empty.
|
||||||
// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
|
// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
|
||||||
// (4) If the operator is Exists or DoesNotExist, the value set must be empty.
|
// (4) If the operator is Exists or DoesNotExist, the value set must be empty.
|
||||||
// (5) The key is invalid due to its length, or sequence
|
// (5) If the operator is Gt or Lt, the values set must contain only one value.
|
||||||
|
// (6) The key is invalid due to its length, or sequence
|
||||||
// of characters. See validateLabelKey for more details.
|
// of characters. See validateLabelKey for more details.
|
||||||
//
|
//
|
||||||
// The empty string is a valid value in the input values set.
|
// The empty string is a valid value in the input values set.
|
||||||
@ -125,6 +130,15 @@ func NewRequirement(key string, op Operator, vals sets.String) (*Requirement, er
|
|||||||
if len(vals) != 0 {
|
if len(vals) != 0 {
|
||||||
return nil, fmt.Errorf("values set must be empty for exists and does not exist")
|
return nil, fmt.Errorf("values set must be empty for exists and does not exist")
|
||||||
}
|
}
|
||||||
|
case GreaterThanOperator, LessThanOperator:
|
||||||
|
if len(vals) != 1 {
|
||||||
|
return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required")
|
||||||
|
}
|
||||||
|
for val := range vals {
|
||||||
|
if _, err := strconv.ParseFloat(val, 64); err != nil {
|
||||||
|
return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be a number")
|
||||||
|
}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("operator '%v' is not recognized", op)
|
return nil, fmt.Errorf("operator '%v' is not recognized", op)
|
||||||
}
|
}
|
||||||
@ -162,6 +176,31 @@ func (r *Requirement) Matches(ls Labels) bool {
|
|||||||
return ls.Has(r.key)
|
return ls.Has(r.key)
|
||||||
case DoesNotExistOperator:
|
case DoesNotExistOperator:
|
||||||
return !ls.Has(r.key)
|
return !ls.Has(r.key)
|
||||||
|
case GreaterThanOperator, LessThanOperator:
|
||||||
|
if !ls.Has(r.key) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
lsValue, err := strconv.ParseFloat(ls.Get(r.key), 64)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(10).Infof("Parse float failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// There should be only one strValue in r.strValues, and can be converted to a float number.
|
||||||
|
if len(r.strValues) != 1 {
|
||||||
|
glog.V(10).Infof("Invalid values count %+v of requirement %+v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var rValue float64
|
||||||
|
for strValue := range r.strValues {
|
||||||
|
rValue, err = strconv.ParseFloat(strValue, 64)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(10).Infof("Parse float failed for value %+v in requirement %+v, for 'Gt', 'Lt' operators, the value must be a number", strValue, r)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (r.operator == GreaterThanOperator && lsValue > rValue) || (r.operator == LessThanOperator && lsValue < rValue)
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -210,6 +249,10 @@ func (r *Requirement) String() string {
|
|||||||
buffer.WriteString(" in ")
|
buffer.WriteString(" in ")
|
||||||
case NotInOperator:
|
case NotInOperator:
|
||||||
buffer.WriteString(" notin ")
|
buffer.WriteString(" notin ")
|
||||||
|
case GreaterThanOperator:
|
||||||
|
buffer.WriteString(">")
|
||||||
|
case LessThanOperator:
|
||||||
|
buffer.WriteString("<")
|
||||||
case ExistsOperator, DoesNotExistOperator:
|
case ExistsOperator, DoesNotExistOperator:
|
||||||
return buffer.String()
|
return buffer.String()
|
||||||
}
|
}
|
||||||
@ -277,8 +320,10 @@ const (
|
|||||||
DoesNotExistToken
|
DoesNotExistToken
|
||||||
DoubleEqualsToken
|
DoubleEqualsToken
|
||||||
EqualsToken
|
EqualsToken
|
||||||
|
GreaterThanToken
|
||||||
IdentifierToken // to represent keys and values
|
IdentifierToken // to represent keys and values
|
||||||
InToken
|
InToken
|
||||||
|
LessThanToken
|
||||||
NotEqualsToken
|
NotEqualsToken
|
||||||
NotInToken
|
NotInToken
|
||||||
OpenParToken
|
OpenParToken
|
||||||
@ -292,7 +337,9 @@ var string2token = map[string]Token{
|
|||||||
"!": DoesNotExistToken,
|
"!": DoesNotExistToken,
|
||||||
"==": DoubleEqualsToken,
|
"==": DoubleEqualsToken,
|
||||||
"=": EqualsToken,
|
"=": EqualsToken,
|
||||||
|
">": GreaterThanToken,
|
||||||
"in": InToken,
|
"in": InToken,
|
||||||
|
"<": LessThanToken,
|
||||||
"!=": NotEqualsToken,
|
"!=": NotEqualsToken,
|
||||||
"notin": NotInToken,
|
"notin": NotInToken,
|
||||||
"(": OpenParToken,
|
"(": OpenParToken,
|
||||||
@ -312,7 +359,7 @@ func isWhitespace(ch byte) bool {
|
|||||||
// isSpecialSymbol detect if the character ch can be an operator
|
// isSpecialSymbol detect if the character ch can be an operator
|
||||||
func isSpecialSymbol(ch byte) bool {
|
func isSpecialSymbol(ch byte) bool {
|
||||||
switch ch {
|
switch ch {
|
||||||
case '=', '!', '(', ')', ',':
|
case '=', '!', '(', ')', ',', '>', '<':
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -526,7 +573,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) {
|
|||||||
switch operator {
|
switch operator {
|
||||||
case InOperator, NotInOperator:
|
case InOperator, NotInOperator:
|
||||||
values, err = p.parseValues()
|
values, err = p.parseValues()
|
||||||
case EqualsOperator, DoubleEqualsOperator, NotEqualsOperator:
|
case EqualsOperator, DoubleEqualsOperator, NotEqualsOperator, GreaterThanOperator, LessThanOperator:
|
||||||
values, err = p.parseExactValue()
|
values, err = p.parseExactValue()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -573,6 +620,10 @@ func (p *Parser) parseOperator() (op Operator, err error) {
|
|||||||
op = EqualsOperator
|
op = EqualsOperator
|
||||||
case DoubleEqualsToken:
|
case DoubleEqualsToken:
|
||||||
op = DoubleEqualsOperator
|
op = DoubleEqualsOperator
|
||||||
|
case GreaterThanToken:
|
||||||
|
op = GreaterThanOperator
|
||||||
|
case LessThanToken:
|
||||||
|
op = LessThanOperator
|
||||||
case NotInToken:
|
case NotInToken:
|
||||||
op = NotInOperator
|
op = NotInOperator
|
||||||
case NotEqualsToken:
|
case NotEqualsToken:
|
||||||
|
@ -34,11 +34,14 @@ func TestSelectorParse(t *testing.T) {
|
|||||||
"x=,z= ",
|
"x=,z= ",
|
||||||
"x= ,z= ",
|
"x= ,z= ",
|
||||||
"!x",
|
"!x",
|
||||||
|
"x>1.1",
|
||||||
|
"x>1.1,z<5.3",
|
||||||
}
|
}
|
||||||
testBadStrings := []string{
|
testBadStrings := []string{
|
||||||
"x=a||y=b",
|
"x=a||y=b",
|
||||||
"x==a==b",
|
"x==a==b",
|
||||||
"!x=a",
|
"!x=a",
|
||||||
|
"x<a",
|
||||||
}
|
}
|
||||||
for _, test := range testGoodStrings {
|
for _, test := range testGoodStrings {
|
||||||
lq, err := Parse(test)
|
lq, err := Parse(test)
|
||||||
@ -107,12 +110,16 @@ func TestSelectorMatches(t *testing.T) {
|
|||||||
expectMatch(t, "notin=in", Set{"notin": "in"}) // in and notin in exactMatch
|
expectMatch(t, "notin=in", Set{"notin": "in"}) // in and notin in exactMatch
|
||||||
expectMatch(t, "x", Set{"x": "z"})
|
expectMatch(t, "x", Set{"x": "z"})
|
||||||
expectMatch(t, "!x", Set{"y": "z"})
|
expectMatch(t, "!x", Set{"y": "z"})
|
||||||
|
expectMatch(t, "x>1.1", Set{"x": "1.2"})
|
||||||
|
expectMatch(t, "x<1.1", Set{"x": "0.8"})
|
||||||
expectNoMatch(t, "x=z", Set{})
|
expectNoMatch(t, "x=z", Set{})
|
||||||
expectNoMatch(t, "x=y", Set{"x": "z"})
|
expectNoMatch(t, "x=y", Set{"x": "z"})
|
||||||
expectNoMatch(t, "x=y,z=w", Set{"x": "w", "z": "w"})
|
expectNoMatch(t, "x=y,z=w", Set{"x": "w", "z": "w"})
|
||||||
expectNoMatch(t, "x!=y,z!=w", Set{"x": "z", "z": "w"})
|
expectNoMatch(t, "x!=y,z!=w", Set{"x": "z", "z": "w"})
|
||||||
expectNoMatch(t, "x", Set{"y": "z"})
|
expectNoMatch(t, "x", Set{"y": "z"})
|
||||||
expectNoMatch(t, "!x", Set{"x": "z"})
|
expectNoMatch(t, "!x", Set{"x": "z"})
|
||||||
|
expectNoMatch(t, "x>1.1", Set{"x": "0.8"})
|
||||||
|
expectNoMatch(t, "x<1.1", Set{"x": "1.1"})
|
||||||
|
|
||||||
labelset := Set{
|
labelset := Set{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
@ -184,6 +191,8 @@ func TestLexer(t *testing.T) {
|
|||||||
{"in", InToken},
|
{"in", InToken},
|
||||||
{"=", EqualsToken},
|
{"=", EqualsToken},
|
||||||
{"==", DoubleEqualsToken},
|
{"==", DoubleEqualsToken},
|
||||||
|
{">", GreaterThanToken},
|
||||||
|
{"<", LessThanToken},
|
||||||
//Note that Lex returns the longest valid token found
|
//Note that Lex returns the longest valid token found
|
||||||
{"!", DoesNotExistToken},
|
{"!", DoesNotExistToken},
|
||||||
{"!=", NotEqualsToken},
|
{"!=", NotEqualsToken},
|
||||||
@ -226,6 +235,8 @@ func TestLexerSequence(t *testing.T) {
|
|||||||
{"()", []Token{OpenParToken, ClosedParToken}},
|
{"()", []Token{OpenParToken, ClosedParToken}},
|
||||||
{"x in (),y", []Token{IdentifierToken, InToken, OpenParToken, ClosedParToken, CommaToken, IdentifierToken}},
|
{"x in (),y", []Token{IdentifierToken, InToken, OpenParToken, ClosedParToken, CommaToken, IdentifierToken}},
|
||||||
{"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken}},
|
{"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken}},
|
||||||
|
{"key>1.1", []Token{IdentifierToken, GreaterThanToken, IdentifierToken}},
|
||||||
|
{"key<0.8", []Token{IdentifierToken, LessThanToken, IdentifierToken}},
|
||||||
}
|
}
|
||||||
for _, v := range testcases {
|
for _, v := range testcases {
|
||||||
var literals []string
|
var literals []string
|
||||||
@ -263,6 +274,8 @@ func TestParserLookahead(t *testing.T) {
|
|||||||
{"", []Token{EndOfStringToken}},
|
{"", []Token{EndOfStringToken}},
|
||||||
{"x in (),y", []Token{IdentifierToken, InToken, OpenParToken, ClosedParToken, CommaToken, IdentifierToken, EndOfStringToken}},
|
{"x in (),y", []Token{IdentifierToken, InToken, OpenParToken, ClosedParToken, CommaToken, IdentifierToken, EndOfStringToken}},
|
||||||
{"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken, EndOfStringToken}},
|
{"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken, EndOfStringToken}},
|
||||||
|
{"key>1.1", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}},
|
||||||
|
{"key<0.8", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}},
|
||||||
}
|
}
|
||||||
for _, v := range testcases {
|
for _, v := range testcases {
|
||||||
p := &Parser{l: &Lexer{s: v.s, pos: 0}, position: 0}
|
p := &Parser{l: &Lexer{s: v.s, pos: 0}, position: 0}
|
||||||
@ -299,6 +312,10 @@ func TestRequirementConstructor(t *testing.T) {
|
|||||||
{"x", DoesNotExistOperator, nil, true},
|
{"x", DoesNotExistOperator, nil, true},
|
||||||
{"1foo", InOperator, sets.NewString("bar"), true},
|
{"1foo", InOperator, sets.NewString("bar"), true},
|
||||||
{"1234", InOperator, sets.NewString("bar"), true},
|
{"1234", InOperator, sets.NewString("bar"), true},
|
||||||
|
{"y", GreaterThanOperator, sets.NewString("1.1"), true},
|
||||||
|
{"z", LessThanOperator, sets.NewString("5.3"), true},
|
||||||
|
{"foo", GreaterThanOperator, sets.NewString("bar"), false},
|
||||||
|
{"barz", LessThanOperator, sets.NewString("blah"), false},
|
||||||
{strings.Repeat("a", 254), ExistsOperator, nil, false}, //breaks DNS rule that len(key) <= 253
|
{strings.Repeat("a", 254), ExistsOperator, nil, false}, //breaks DNS rule that len(key) <= 253
|
||||||
}
|
}
|
||||||
for _, rc := range requirementConstructorTests {
|
for _, rc := range requirementConstructorTests {
|
||||||
@ -343,6 +360,11 @@ func TestToString(t *testing.T) {
|
|||||||
getRequirement("z", NotEqualsOperator, sets.NewString("a"), t),
|
getRequirement("z", NotEqualsOperator, sets.NewString("a"), t),
|
||||||
getRequirement("z", ExistsOperator, nil, t)},
|
getRequirement("z", ExistsOperator, nil, t)},
|
||||||
"x=abc,y==jkl,z!=a,z", true},
|
"x=abc,y==jkl,z!=a,z", true},
|
||||||
|
{&internalSelector{
|
||||||
|
getRequirement("x", GreaterThanOperator, sets.NewString("2.4"), t),
|
||||||
|
getRequirement("y", LessThanOperator, sets.NewString("7.1"), t),
|
||||||
|
getRequirement("z", ExistsOperator, nil, t)},
|
||||||
|
"x>2.4,y<7.1,z", true},
|
||||||
}
|
}
|
||||||
for _, ts := range toStringTests {
|
for _, ts := range toStringTests {
|
||||||
if out := ts.In.String(); out == "" && ts.Valid {
|
if out := ts.In.String(); out == "" && ts.Valid {
|
||||||
@ -386,6 +408,12 @@ func TestRequirementSelectorMatching(t *testing.T) {
|
|||||||
{Set{"y": "baz"}, &internalSelector{
|
{Set{"y": "baz"}, &internalSelector{
|
||||||
getRequirement("x", InOperator, sets.NewString(""), t),
|
getRequirement("x", InOperator, sets.NewString(""), t),
|
||||||
}, false},
|
}, false},
|
||||||
|
{Set{"z": "1.2"}, &internalSelector{
|
||||||
|
getRequirement("z", GreaterThanOperator, sets.NewString("1.0"), t),
|
||||||
|
}, true},
|
||||||
|
{Set{"z": "v1.2"}, &internalSelector{
|
||||||
|
getRequirement("z", GreaterThanOperator, sets.NewString("1.0"), t),
|
||||||
|
}, false},
|
||||||
}
|
}
|
||||||
for _, lsm := range labelSelectorMatchingTests {
|
for _, lsm := range labelSelectorMatchingTests {
|
||||||
if match := lsm.Sel.Matches(lsm.Set); match != lsm.Match {
|
if match := lsm.Sel.Matches(lsm.Set); match != lsm.Match {
|
||||||
@ -445,6 +473,12 @@ func TestSetSelectorParser(t *testing.T) {
|
|||||||
{"x=a", internalSelector{
|
{"x=a", internalSelector{
|
||||||
getRequirement("x", EqualsOperator, sets.NewString("a"), t),
|
getRequirement("x", EqualsOperator, sets.NewString("a"), t),
|
||||||
}, true, true},
|
}, true, true},
|
||||||
|
{"x>1.1", internalSelector{
|
||||||
|
getRequirement("x", GreaterThanOperator, sets.NewString("1.1"), t),
|
||||||
|
}, true, true},
|
||||||
|
{"x<7.1", internalSelector{
|
||||||
|
getRequirement("x", LessThanOperator, sets.NewString("7.1"), t),
|
||||||
|
}, true, true},
|
||||||
{"x=a,y!=b", internalSelector{
|
{"x=a,y!=b", internalSelector{
|
||||||
getRequirement("x", EqualsOperator, sets.NewString("a"), t),
|
getRequirement("x", EqualsOperator, sets.NewString("a"), t),
|
||||||
getRequirement("y", NotEqualsOperator, sets.NewString("b"), t),
|
getRequirement("y", NotEqualsOperator, sets.NewString("b"), t),
|
||||||
|
@ -343,12 +343,70 @@ func NewSelectorMatchPredicate(info NodeInfo) algorithm.FitPredicate {
|
|||||||
return selector.PodSelectorMatches
|
return selector.PodSelectorMatches
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
// NodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
||||||
if len(pod.Spec.NodeSelector) == 0 {
|
// terms are ORed, and an emtpy a list of terms will match nothing.
|
||||||
return true
|
func NodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSelectorTerm) bool {
|
||||||
|
for _, req := range nodeSelectorTerms {
|
||||||
|
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
return false
|
||||||
return selector.Matches(labels.Set(node.Labels))
|
}
|
||||||
|
|
||||||
|
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
||||||
|
func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
||||||
|
// Check if node.Labels match pod.Spec.NodeSelector.
|
||||||
|
if len(pod.Spec.NodeSelector) > 0 {
|
||||||
|
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
||||||
|
if !selector.Matches(labels.Set(node.Labels)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse required node affinity scheduling requirements
|
||||||
|
// and check if the current node match the requirements.
|
||||||
|
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes)
|
||||||
|
// 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes
|
||||||
|
// 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity
|
||||||
|
// 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes
|
||||||
|
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
|
||||||
|
// 6. non-nil empty NodeSelectorRequirement is not allowed
|
||||||
|
nodeAffinityMatches := true
|
||||||
|
if affinity.NodeAffinity != nil {
|
||||||
|
nodeAffinity := affinity.NodeAffinity
|
||||||
|
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
|
||||||
|
if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution == nil && nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match node selector for requiredDuringSchedulingRequiredDuringExecution.
|
||||||
|
if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||||
|
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms
|
||||||
|
glog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||||
|
nodeAffinityMatches = NodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match node selector for requiredDuringSchedulingRequiredDuringExecution.
|
||||||
|
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
|
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||||
|
glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||||
|
nodeAffinityMatches = nodeAffinityMatches && NodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return nodeAffinityMatches
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeSelector struct {
|
type NodeSelector struct {
|
||||||
|
@ -574,7 +574,395 @@ func TestPodFitsSelector(t *testing.T) {
|
|||||||
fits: false,
|
fits: false,
|
||||||
test: "node labels are subset",
|
test: "node labels are subset",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["bar", "value2"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with matchExpressions using In operator that matches the existing node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "kernel-version",
|
||||||
|
"operator": "Gt",
|
||||||
|
"values": ["2.4"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"kernel-version": "2.6",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with matchExpressions using Gt operator that matches the existing node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "mem-type",
|
||||||
|
"operator": "NotIn",
|
||||||
|
"values": ["DDR", "DDR2"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"mem-type": "DDR3",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with matchExpressions using NotIn operator that matches the existing node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "GPU",
|
||||||
|
"operator": "Exists"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"GPU": "NVIDIA-GRID-K1",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with matchExpressions using Exists operator that matches the existing node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["value1", "value2"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with affinity that don't match node's labels won't schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": null
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with a nil []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": []
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with an empty []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{}, {}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with invalid NodeSelectTerms in affinity will match no objects and won't schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{"matchExpressions": [{}]}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with empty MatchExpressions is not a valid value will match no objects and won't schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"some-key": "some-value",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with no Affinity will schedule onto a node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": null
|
||||||
|
}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with Affinity but nil NodeSelector will schedule onto a node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "GPU",
|
||||||
|
"operator": "Exists"
|
||||||
|
}, {
|
||||||
|
"key": "GPU",
|
||||||
|
"operator": "NotIn",
|
||||||
|
"values": ["AMD", "INTER"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"GPU": "NVIDIA-GRID-K1",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with multiple matchExpressions ANDed that matches the existing node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "GPU",
|
||||||
|
"operator": "Exists"
|
||||||
|
}, {
|
||||||
|
"key": "GPU",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["AMD", "INTER"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"GPU": "NVIDIA-GRID-K1",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with multiple matchExpressions ANDed that doesn't match the existing node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [
|
||||||
|
{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["bar", "value2"]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "diffkey",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["wrong", "value2"]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with multiple NodeSelectorTerms ORed in affinity, matches the node's labels and will schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["bar", "value2"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "NotIn",
|
||||||
|
"values": ["bar", "value2"]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with an Affinity both requiredDuringSchedulingRequiredDuringExecution and " +
|
||||||
|
"requiredDuringSchedulingIgnoredDuringExecution indicated that don't match node's labels and won't schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "Exists"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
NodeSelector: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
fits: true,
|
||||||
|
test: "Pod with an Affinity and a PodSpec.NodeSelector(the old thing that we are deprecating) " +
|
||||||
|
"both are satisfied, will schedule onto the node",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "Exists"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
NodeSelector: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
labels: map[string]string{
|
||||||
|
"foo": "barrrrrr",
|
||||||
|
},
|
||||||
|
fits: false,
|
||||||
|
test: "Pod with an Affinity matches node's labels but the PodSpec.NodeSelector(the old thing that we are deprecating) " +
|
||||||
|
"is not satisfied, won't schedule onto the node",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
|
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
|
||||||
|
|
||||||
|
95
plugin/pkg/scheduler/algorithm/priorities/node_affinity.go
Normal file
95
plugin/pkg/scheduler/algorithm/priorities/node_affinity.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package priorities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NodeAffinity struct {
|
||||||
|
nodeLister algorithm.NodeLister
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNodeAffinityPriority(nodeLister algorithm.NodeLister) algorithm.PriorityFunction {
|
||||||
|
nodeAffinity := &NodeAffinity{
|
||||||
|
nodeLister: nodeLister,
|
||||||
|
}
|
||||||
|
return nodeAffinity.CalculateNodeAffinityPriority
|
||||||
|
}
|
||||||
|
|
||||||
|
// CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences
|
||||||
|
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
|
||||||
|
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
|
||||||
|
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
||||||
|
// score the node gets.
|
||||||
|
func (s *NodeAffinity) CalculateNodeAffinityPriority(pod *api.Pod, machinesToPods map[string][]*api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||||
|
|
||||||
|
var maxCount int
|
||||||
|
counts := map[string]int{}
|
||||||
|
|
||||||
|
nodes, err := nodeLister.List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// A nil element of PreferredDuringSchedulingIgnoredDuringExecution matches no objects.
|
||||||
|
// An element of PreferredDuringSchedulingIgnoredDuringExecution that refers to an
|
||||||
|
// empty PreferredSchedulingTerm matches all objects.
|
||||||
|
if affinity.NodeAffinity != nil && affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
|
// Match PreferredDuringSchedulingIgnoredDuringExecution term by term.
|
||||||
|
for _, preferredSchedulingTerm := range affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||||
|
if preferredSchedulingTerm.Weight == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||||
|
counts[node.Name] += preferredSchedulingTerm.Weight
|
||||||
|
}
|
||||||
|
|
||||||
|
if counts[node.Name] > maxCount {
|
||||||
|
maxCount = counts[node.Name]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := []schedulerapi.HostPriority{}
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
fScore := float64(0)
|
||||||
|
if maxCount > 0 {
|
||||||
|
fScore = 10 * (float64(counts[node.Name]) / float64(maxCount))
|
||||||
|
}
|
||||||
|
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||||
|
glog.V(10).Infof("%v -> %v: NodeAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
167
plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go
Normal file
167
plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package priorities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNodeAffinityPriority(t *testing.T) {
|
||||||
|
label1 := map[string]string{"foo": "bar"}
|
||||||
|
label2 := map[string]string{"key": "value"}
|
||||||
|
label3 := map[string]string{"az": "az1"}
|
||||||
|
label4 := map[string]string{"abc": "az11", "def": "az22"}
|
||||||
|
label5 := map[string]string{"foo": "bar", "key": "value", "az": "az1"}
|
||||||
|
|
||||||
|
affinity1 := map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||||
|
{
|
||||||
|
"weight": 2,
|
||||||
|
"preference": {
|
||||||
|
"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In", "values": ["bar"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]}}`,
|
||||||
|
}
|
||||||
|
|
||||||
|
affinity2 := map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||||
|
{
|
||||||
|
"weight": 2,
|
||||||
|
"preference": {"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In", "values": ["bar"]
|
||||||
|
}
|
||||||
|
]}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"weight": 4,
|
||||||
|
"preference": {"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "key",
|
||||||
|
"operator": "In", "values": ["value"]
|
||||||
|
}
|
||||||
|
]}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"weight": 5,
|
||||||
|
"preference": {"matchExpressions": [
|
||||||
|
{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In", "values": ["bar"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "key",
|
||||||
|
"operator": "In", "values": ["value"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "az",
|
||||||
|
"operator": "In", "values": ["az1"]
|
||||||
|
}
|
||||||
|
]}
|
||||||
|
}
|
||||||
|
]}}`,
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pod *api.Pod
|
||||||
|
nodes []api.Node
|
||||||
|
expectedList schedulerapi.HostPriorityList
|
||||||
|
test string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes: []api.Node{
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
|
},
|
||||||
|
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||||
|
test: "all machines are same priority as NodeAffinity is nil",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: affinity1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes: []api.Node{
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label4}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
|
},
|
||||||
|
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||||
|
test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: affinity1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes: []api.Node{
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
|
},
|
||||||
|
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||||
|
test: "only machine1 matches the preferred scheduling requirements of pod",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Annotations: affinity2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodes: []api.Node{
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||||
|
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
|
},
|
||||||
|
expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine5", 10}, {"machine2", 3}},
|
||||||
|
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
nodeAffinity := NodeAffinity{nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})}
|
||||||
|
list, err := nodeAffinity.CalculateNodeAffinityPriority(test.pod, nil, nil, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
|
t.Errorf("%s: \nexpected %#v, \ngot %#v", test.test, test.expectedList, list)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -99,5 +99,14 @@ func defaultPriorities() sets.String {
|
|||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
|
factory.RegisterPriorityConfigFactory(
|
||||||
|
"NodeAffinityPriority",
|
||||||
|
factory.PriorityConfigFactory{
|
||||||
|
Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
|
||||||
|
return priorities.NewNodeAffinityPriority(args.NodeLister)
|
||||||
|
},
|
||||||
|
Weight: 1,
|
||||||
|
},
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -18,9 +18,11 @@ package e2e
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
@ -29,6 +31,7 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
_ "github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns a number of currently scheduled and not scheduled Pods.
|
// Returns a number of currently scheduled and not scheduled Pods.
|
||||||
@ -392,6 +395,49 @@ var _ = Describe("SchedulerPredicates [Serial]", func() {
|
|||||||
cleanupPods(c, ns)
|
cleanupPods(c, ns)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("validates that a pod with an invalid Affinity is rejected [Conformance]", func() {
|
||||||
|
|
||||||
|
By("Trying to launch a pod with an invalid Affinity data.")
|
||||||
|
podName := "without-label"
|
||||||
|
_, err := c.Pods(ns).Create(&api.Pod{
|
||||||
|
TypeMeta: unversioned.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
},
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
api.AffinityAnnotationKey: `
|
||||||
|
{"nodeAffinity": {
|
||||||
|
"requiredDuringSchedulingRequiredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [{
|
||||||
|
"matchExpressions": []
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: podName,
|
||||||
|
Image: "gcr.io/google_containers/pause:2.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil || !errors.IsInvalid(err) {
|
||||||
|
Failf("Expect error of invalid, got : %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait a bit to allow scheduler to do its thing if the pod is not rejected.
|
||||||
|
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
||||||
|
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
cleanupPods(c, ns)
|
||||||
|
})
|
||||||
|
|
||||||
It("validates that NodeSelector is respected if matching [Conformance]", func() {
|
It("validates that NodeSelector is respected if matching [Conformance]", func() {
|
||||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||||
// not just take the node list and choose the first of them. Depending on the
|
// not just take the node list and choose the first of them. Depending on the
|
||||||
@ -470,4 +516,217 @@ var _ = Describe("SchedulerPredicates [Serial]", func() {
|
|||||||
expectNoError(err)
|
expectNoError(err)
|
||||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
|
||||||
|
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
|
||||||
|
It("validates that NodeAffinity is respected if not matching [Conformance]", func() {
|
||||||
|
By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||||
|
podName := "restricted-pod"
|
||||||
|
|
||||||
|
waitForStableCluster(c)
|
||||||
|
|
||||||
|
_, err := c.Pods(ns).Create(&api.Pod{
|
||||||
|
TypeMeta: unversioned.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
},
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
Labels: map[string]string{"name": "restricted"},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"scheduler.alpha.kubernetes.io/affinity": `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [
|
||||||
|
{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "foo",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["bar", "value2"]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "diffkey",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["wrong", "value2"]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: podName,
|
||||||
|
Image: "gcr.io/google_containers/pause:2.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expectNoError(err)
|
||||||
|
// Wait a bit to allow scheduler to do its thing
|
||||||
|
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
||||||
|
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
verifyResult(c, podName, ns)
|
||||||
|
cleanupPods(c, ns)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Keep the same steps with the test on NodeSelector,
|
||||||
|
// but specify Affinity in Pod.Annotations, instead of NodeSelector.
|
||||||
|
It("validates that required NodeAffinity setting is respected if matching [Conformance]", func() {
|
||||||
|
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||||
|
// not just take the node list and choose the first of them. Depending on the
|
||||||
|
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||||
|
// scheduled onto it.
|
||||||
|
By("Trying to launch a pod without a label to get a node which can launch it.")
|
||||||
|
podName := "without-label"
|
||||||
|
_, err := c.Pods(ns).Create(&api.Pod{
|
||||||
|
TypeMeta: unversioned.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
},
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: podName,
|
||||||
|
Image: "gcr.io/google_containers/pause:2.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expectNoError(err)
|
||||||
|
expectNoError(waitForPodRunningInNamespace(c, podName, ns))
|
||||||
|
pod, err := c.Pods(ns).Get(podName)
|
||||||
|
expectNoError(err)
|
||||||
|
|
||||||
|
nodeName := pod.Spec.NodeName
|
||||||
|
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||||
|
expectNoError(err)
|
||||||
|
|
||||||
|
By("Trying to apply a random label on the found node.")
|
||||||
|
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID()))
|
||||||
|
v := "42"
|
||||||
|
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||||
|
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||||
|
expectNoError(err)
|
||||||
|
|
||||||
|
node, err := c.Nodes().Get(nodeName)
|
||||||
|
expectNoError(err)
|
||||||
|
Expect(node.Labels[k]).To(Equal(v))
|
||||||
|
|
||||||
|
By("Trying to relaunch the pod, now with labels.")
|
||||||
|
labelPodName := "with-labels"
|
||||||
|
_, err = c.Pods(ns).Create(&api.Pod{
|
||||||
|
TypeMeta: unversioned.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
},
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: labelPodName,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"scheduler.alpha.kubernetes.io/affinity": `
|
||||||
|
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||||
|
"nodeSelectorTerms": [
|
||||||
|
{
|
||||||
|
"matchExpressions": [{
|
||||||
|
"key": "kubernetes.io/hostname",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["` + nodeName + `"]
|
||||||
|
},{
|
||||||
|
"key": "` + k + `",
|
||||||
|
"operator": "In",
|
||||||
|
"values": ["` + v + `"]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: labelPodName,
|
||||||
|
Image: "gcr.io/google_containers/pause:2.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expectNoError(err)
|
||||||
|
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||||
|
|
||||||
|
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||||
|
// pod is running because this will create a race condition with the
|
||||||
|
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||||
|
// already when the kubelet does not know about its new label yet. The
|
||||||
|
// kubelet will then refuse to launch the pod.
|
||||||
|
expectNoError(waitForPodNotPending(c, ns, labelPodName))
|
||||||
|
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||||
|
expectNoError(err)
|
||||||
|
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify that an escaped JSON string of NodeAffinity in a YAML PodSpec works.
|
||||||
|
It("validates that embedding the JSON NodeAffinity setting as a string in the annotation value work [Conformance]", func() {
|
||||||
|
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||||
|
// not just take the node list and choose the first of them. Depending on the
|
||||||
|
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||||
|
// scheduled onto it.
|
||||||
|
By("Trying to launch a pod without a label to get a node which can launch it.")
|
||||||
|
podName := "without-label"
|
||||||
|
_, err := c.Pods(ns).Create(&api.Pod{
|
||||||
|
TypeMeta: unversioned.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
},
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: podName,
|
||||||
|
Image: "gcr.io/google_containers/pause:2.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expectNoError(err)
|
||||||
|
expectNoError(waitForPodRunningInNamespace(c, podName, ns))
|
||||||
|
pod, err := c.Pods(ns).Get(podName)
|
||||||
|
expectNoError(err)
|
||||||
|
|
||||||
|
nodeName := pod.Spec.NodeName
|
||||||
|
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||||
|
expectNoError(err)
|
||||||
|
|
||||||
|
By("Trying to apply a label with fake az info on the found node.")
|
||||||
|
k := "kubernetes.io/e2e-az-name"
|
||||||
|
v := "e2e-az1"
|
||||||
|
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||||
|
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||||
|
expectNoError(err)
|
||||||
|
|
||||||
|
node, err := c.Nodes().Get(nodeName)
|
||||||
|
expectNoError(err)
|
||||||
|
Expect(node.Labels[k]).To(Equal(v))
|
||||||
|
|
||||||
|
By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.")
|
||||||
|
labelPodName := "with-labels"
|
||||||
|
nodeSelectionRoot := filepath.Join(testContext.RepoRoot, "docs/user-guide/node-selection")
|
||||||
|
testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-node-affinity.yaml")
|
||||||
|
runKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns))
|
||||||
|
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||||
|
|
||||||
|
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||||
|
// pod is running because this will create a race condition with the
|
||||||
|
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||||
|
// already when the kubelet does not know about its new label yet. The
|
||||||
|
// kubelet will then refuse to launch the pod.
|
||||||
|
expectNoError(waitForPodNotPending(c, ns, labelPodName))
|
||||||
|
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||||
|
expectNoError(err)
|
||||||
|
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user