Add support for pre-allocated hugepages with 2 sizes
Remove the validation for pre-allocated hugepages on node level. Validation is currently the only thing making it impossible to use pre-allocated huge pages in more than one size. We have now quite a few reports from real users that this feature is welcome.
This commit is contained in:
@@ -4424,8 +4424,15 @@ func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *fie
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// NodeValidationOptions contains the different settings for node validation
|
||||
type NodeValidationOptions struct {
|
||||
// Should node a spec containing more than one huge page resource (with different sizes)
|
||||
// with pre-allocated memory trigger validation errors
|
||||
ValidateSingleHugePageResource bool
|
||||
}
|
||||
|
||||
// ValidateNode tests if required fields in the node are set.
|
||||
func ValidateNode(node *core.Node) field.ErrorList {
|
||||
func ValidateNode(node *core.Node, opts NodeValidationOptions) field.ErrorList {
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath)
|
||||
allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
|
||||
@@ -4436,7 +4443,7 @@ func ValidateNode(node *core.Node) field.ErrorList {
|
||||
// Only validate spec.
|
||||
// All status fields are optional and can be updated later.
|
||||
// That said, if specified, we need to ensure they are valid.
|
||||
allErrs = append(allErrs, ValidateNodeResources(node)...)
|
||||
allErrs = append(allErrs, ValidateNodeResources(node, opts)...)
|
||||
|
||||
// validate PodCIDRS only if we need to
|
||||
if len(node.Spec.PodCIDRs) > 0 {
|
||||
@@ -4476,13 +4483,33 @@ func ValidateNode(node *core.Node) field.ErrorList {
|
||||
}
|
||||
|
||||
// ValidateNodeResources is used to make sure a node has valid capacity and allocatable values.
|
||||
func ValidateNodeResources(node *core.Node) field.ErrorList {
|
||||
func ValidateNodeResources(node *core.Node, opts NodeValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if opts.ValidateSingleHugePageResource {
|
||||
allErrs = append(allErrs, ValidateNodeSingleHugePageResources(node)...)
|
||||
}
|
||||
|
||||
// Validate resource quantities in capacity.
|
||||
for k, v := range node.Status.Capacity {
|
||||
resPath := field.NewPath("status", "capacity", string(k))
|
||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
|
||||
// Validate resource quantities in allocatable.
|
||||
for k, v := range node.Status.Allocatable {
|
||||
resPath := field.NewPath("status", "allocatable", string(k))
|
||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateNodeHugePageResources is used to make sure a node has valid capacity and allocatable values for the huge page resources.
|
||||
func ValidateNodeSingleHugePageResources(node *core.Node) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// Validate resource quantities in capacity.
|
||||
hugePageSizes := sets.NewString()
|
||||
for k, v := range node.Status.Capacity {
|
||||
resPath := field.NewPath("status", "capacity", string(k))
|
||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
|
||||
// track any huge page size that has a positive value
|
||||
if helper.IsHugePageResourceName(k) && v.Value() > int64(0) {
|
||||
hugePageSizes.Insert(string(k))
|
||||
@@ -4495,7 +4522,6 @@ func ValidateNodeResources(node *core.Node) field.ErrorList {
|
||||
hugePageSizes = sets.NewString()
|
||||
for k, v := range node.Status.Allocatable {
|
||||
resPath := field.NewPath("status", "allocatable", string(k))
|
||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...)
|
||||
// track any huge page size that has a positive value
|
||||
if helper.IsHugePageResourceName(k) && v.Value() > int64(0) {
|
||||
hugePageSizes.Insert(string(k))
|
||||
@@ -4508,7 +4534,7 @@ func ValidateNodeResources(node *core.Node) field.ErrorList {
|
||||
}
|
||||
|
||||
// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode.
|
||||
func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
|
||||
func ValidateNodeUpdate(node, oldNode *core.Node, opts NodeValidationOptions) field.ErrorList {
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath)
|
||||
allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
|
||||
@@ -4519,7 +4545,7 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
|
||||
// allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty"))
|
||||
// }
|
||||
|
||||
allErrs = append(allErrs, ValidateNodeResources(node)...)
|
||||
allErrs = append(allErrs, ValidateNodeResources(node, opts)...)
|
||||
|
||||
// Validate no duplicate addresses in node status.
|
||||
addresses := make(map[core.NodeAddress]bool)
|
||||
|
||||
@@ -10812,6 +10812,9 @@ func TestValidateReplicationController(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateNode(t *testing.T) {
|
||||
opts := NodeValidationOptions{
|
||||
ValidateSingleHugePageResource: true,
|
||||
}
|
||||
validSelector := map[string]string{"a": "b"}
|
||||
invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"}
|
||||
successCases := []core.Node{
|
||||
@@ -10918,7 +10921,7 @@ func TestValidateNode(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, successCase := range successCases {
|
||||
if errs := ValidateNode(&successCase); len(errs) != 0 {
|
||||
if errs := ValidateNode(&successCase, opts); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
@@ -11142,7 +11145,7 @@ func TestValidateNode(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for k, v := range errorCases {
|
||||
errs := ValidateNode(&v)
|
||||
errs := ValidateNode(&v, opts)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", k)
|
||||
}
|
||||
@@ -11169,7 +11172,134 @@ func TestValidateNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeValidationOptions(t *testing.T) {
|
||||
updateTests := []struct {
|
||||
oldNode core.Node
|
||||
node core.Node
|
||||
opts NodeValidationOptions
|
||||
valid bool
|
||||
}{
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
}, NodeValidationOptions{ValidateSingleHugePageResource: true}, false},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
}, NodeValidationOptions{ValidateSingleHugePageResource: true}, false},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
}, NodeValidationOptions{ValidateSingleHugePageResource: false}, true},
|
||||
}
|
||||
for i, test := range updateTests {
|
||||
test.oldNode.ObjectMeta.ResourceVersion = "1"
|
||||
test.node.ObjectMeta.ResourceVersion = "1"
|
||||
errs := ValidateNodeUpdate(&test.node, &test.oldNode, test.opts)
|
||||
if test.valid && len(errs) > 0 {
|
||||
t.Errorf("%d: Unexpected error: %v", i, errs)
|
||||
t.Logf("%#v vs %#v", test.oldNode.ObjectMeta, test.node.ObjectMeta)
|
||||
}
|
||||
if !test.valid && len(errs) == 0 {
|
||||
t.Errorf("%d: Unexpected non-error", i)
|
||||
t.Logf("%#v vs %#v", test.oldNode.ObjectMeta, test.node.ObjectMeta)
|
||||
}
|
||||
}
|
||||
nodeTests := []struct {
|
||||
node core.Node
|
||||
opts NodeValidationOptions
|
||||
valid bool
|
||||
}{
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
}, NodeValidationOptions{ValidateSingleHugePageResource: true}, false},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-validate-single-hugepages",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName("hugepages-2Mi"): resource.MustParse("1Gi"),
|
||||
core.ResourceName("hugepages-1Gi"): resource.MustParse("2Gi"),
|
||||
core.ResourceName("hugepages-64Ki"): resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
}, NodeValidationOptions{ValidateSingleHugePageResource: false}, true},
|
||||
}
|
||||
for i, test := range nodeTests {
|
||||
test.node.ObjectMeta.ResourceVersion = "1"
|
||||
errs := ValidateNode(&test.node, test.opts)
|
||||
if test.valid && len(errs) > 0 {
|
||||
t.Errorf("%d: Unexpected error: %v", i, errs)
|
||||
}
|
||||
if !test.valid && len(errs) == 0 {
|
||||
t.Errorf("%d: Unexpected non-error", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestValidateNodeUpdate(t *testing.T) {
|
||||
opts := NodeValidationOptions{
|
||||
ValidateSingleHugePageResource: true,
|
||||
}
|
||||
tests := []struct {
|
||||
oldNode core.Node
|
||||
node core.Node
|
||||
@@ -11593,7 +11723,7 @@ func TestValidateNodeUpdate(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
test.oldNode.ObjectMeta.ResourceVersion = "1"
|
||||
test.node.ObjectMeta.ResourceVersion = "1"
|
||||
errs := ValidateNodeUpdate(&test.node, &test.oldNode)
|
||||
errs := ValidateNodeUpdate(&test.node, &test.oldNode, opts)
|
||||
if test.valid && len(errs) > 0 {
|
||||
t.Errorf("%d: Unexpected error: %v", i, errs)
|
||||
t.Logf("%#v vs %#v", test.oldNode.ObjectMeta, test.node.ObjectMeta)
|
||||
@@ -14779,6 +14909,9 @@ func makeNode(nodeName string, podCIDRs []string) core.Node {
|
||||
}
|
||||
}
|
||||
func TestValidateNodeCIDRs(t *testing.T) {
|
||||
opts := NodeValidationOptions{
|
||||
ValidateSingleHugePageResource: true,
|
||||
}
|
||||
testCases := []struct {
|
||||
expectError bool
|
||||
node core.Node
|
||||
@@ -14839,7 +14972,7 @@ func TestValidateNodeCIDRs(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
errs := ValidateNode(&testCase.node)
|
||||
errs := ValidateNode(&testCase.node, opts)
|
||||
if len(errs) == 0 && testCase.expectError {
|
||||
t.Errorf("expected failure for %s, but there were none", testCase.node.Name)
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user