explicit kubelet config key in Node.Spec.ConfigSource.ConfigMap
This makes the Kubelet config key in the ConfigMap an explicit part of the API, so we can stop using magic key names. As part of this change, we are retiring ConfigMapRef for ConfigMap.
This commit is contained in:
@@ -84,10 +84,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
// were initially set via the locally provisioned configuration.
|
||||
// This is the same strategy several other e2e node tests use.
|
||||
setAndTestKubeletConfigState(f, &configState{desc: "reset to original values",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name}},
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(originalConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
@@ -123,8 +125,8 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
// Node.Spec.ConfigSource is nil
|
||||
{desc: "Node.Spec.ConfigSource is nil",
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource is nil",
|
||||
configSource: nil,
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: status.CurLocalMessage,
|
||||
@@ -132,65 +134,107 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// Node.Spec.ConfigSource has all nil subfields
|
||||
{desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
configSource: &apiv1.NodeConfigSource{},
|
||||
apierr: "exactly one reference subfield must be non-nil",
|
||||
},
|
||||
|
||||
// Node.Spec.ConfigSource.ConfigMapRef is partial
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMapRef is partial",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: "foo",
|
||||
Name: "bar"}}, // missing Namespace
|
||||
apierr: "name, namespace, and UID must all be non-empty",
|
||||
// Node.Spec.ConfigSource.ConfigMap is partial
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMap is partial",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Name: "bar",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}, // missing Namespace
|
||||
apierr: "spec.configSource.configMap.namespace: Required value: namespace must be set in spec",
|
||||
},
|
||||
|
||||
// Node.Spec.ConfigSource's UID does not align with namespace/name
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMapRef.UID does not align with Namespace/Name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{UID: "foo",
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Name: "bar",
|
||||
Namespace: "baz",
|
||||
ResourceVersion: "1",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
apierr: "spec.configSource.configMap.resourceVersion: Forbidden: resourceVersion must not be set in spec",
|
||||
},
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMap has invalid namespace",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Name: "bar",
|
||||
Namespace: "../baz",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
apierr: "spec.configSource.configMap.namespace: Invalid value",
|
||||
},
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMap has invalid name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Name: "../bar",
|
||||
Namespace: "baz",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
apierr: "spec.configSource.configMap.name: Invalid value",
|
||||
},
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMap has invalid kubeletConfigKey",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Name: "bar",
|
||||
Namespace: "baz",
|
||||
KubeletConfigKey: "../qux",
|
||||
}},
|
||||
apierr: "spec.configSource.configMap.kubeletConfigKey: Invalid value",
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap.UID does not align with Namespace/Name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID))},
|
||||
expectConfig: nil,
|
||||
event: false,
|
||||
},
|
||||
|
||||
// correct
|
||||
{desc: "correct",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
{
|
||||
desc: "correct",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(correctConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: correctKC,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// fail-parse
|
||||
{desc: "fail-parse",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name}},
|
||||
{
|
||||
desc: "fail-parse",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(failParseConfigMap))},
|
||||
expectConfig: nil,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// fail-validate
|
||||
{desc: "fail-validate",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name}},
|
||||
{
|
||||
desc: "fail-validate",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgLocalMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, configMapAPIPath(failValidateConfigMap))},
|
||||
@@ -229,10 +273,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
states := []configState{
|
||||
// intended lkg
|
||||
{desc: "intended last-known-good",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name}},
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
@@ -242,10 +288,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
|
||||
// bad config
|
||||
{desc: "bad config",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name}},
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(lkgConfigMap)),
|
||||
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(badConfigMap))},
|
||||
@@ -259,6 +307,55 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
})
|
||||
})
|
||||
|
||||
Context("When a remote config becomes the new last-known-good, and then Node.ConfigSource.ConfigMap.KubeletConfigKey is updated to use a new, bad config", func() {
|
||||
It("the Kubelet should report a status and configz indicating that it rolled back to the new last-known-good", func() {
|
||||
const badConfigKey = "bad"
|
||||
var err error
|
||||
// we base the "lkg" configmap off of the current configuration
|
||||
lkgKC := originalKC.DeepCopy()
|
||||
combinedConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-combined", lkgKC)
|
||||
combinedConfigMap.Data[badConfigKey] = "{0xdeadbeef}"
|
||||
combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
// intended lkg
|
||||
{desc: "intended last-known-good",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: combinedConfigMap.UID,
|
||||
Namespace: combinedConfigMap.Namespace,
|
||||
Name: combinedConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(combinedConfigMap)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
|
||||
// bad config
|
||||
{desc: "bad config",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: combinedConfigMap.UID,
|
||||
Namespace: combinedConfigMap.Namespace,
|
||||
Name: combinedConfigMap.Name,
|
||||
KubeletConfigKey: badConfigKey,
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionFalse,
|
||||
// TODO(mtaufen): status should be more informative, and report the key being used
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, configMapAPIPath(combinedConfigMap)),
|
||||
Reason: fmt.Sprintf(status.CurFailLoadReasonFmt, configMapAPIPath(combinedConfigMap))},
|
||||
expectConfig: lkgKC,
|
||||
event: true,
|
||||
},
|
||||
}
|
||||
|
||||
// wait 12 minutes after setting the first config to ensure it has time to pass the trial duration
|
||||
testBothDirections(f, &states[0], states[1:], 12*time.Minute)
|
||||
})
|
||||
})
|
||||
|
||||
// This stress test will help turn up resource leaks across kubelet restarts that can, over time,
|
||||
// break our ability to dynamically update kubelet config
|
||||
Context("When changing the configuration 100 times", func() {
|
||||
@@ -280,10 +377,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
|
||||
states := []configState{
|
||||
{desc: "cm1",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name}},
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm1)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
@@ -292,10 +391,12 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube
|
||||
},
|
||||
|
||||
{desc: "cm2",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name}},
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}},
|
||||
expectConfigOk: &apiv1.NodeCondition{Type: apiv1.NodeKubeletConfigOk, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, configMapAPIPath(cm2)),
|
||||
Reason: status.CurRemoteOkayReason},
|
||||
@@ -335,7 +436,7 @@ func testBothDirections(f *framework.Framework, first *configState, states []con
|
||||
}
|
||||
}
|
||||
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the ConfigOk condition
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the KubeletConfigOk condition
|
||||
// and (if appropriate) configuration exposed via conifgz are as expected.
|
||||
// The configuration will be converted to the internal type prior to comparison.
|
||||
func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) {
|
||||
@@ -479,8 +580,8 @@ func checkEvent(f *framework.Framework, desc string, expect *apiv1.NodeConfigSou
|
||||
// ensure the message is what we expect (including the resource path)
|
||||
expectMessage := fmt.Sprintf(controller.EventMessageFmt, controller.LocalConfigMessage)
|
||||
if expect != nil {
|
||||
if expect.ConfigMapRef != nil {
|
||||
expectMessage = fmt.Sprintf(controller.EventMessageFmt, fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", expect.ConfigMapRef.Namespace, expect.ConfigMapRef.Name))
|
||||
if expect.ConfigMap != nil {
|
||||
expectMessage = fmt.Sprintf(controller.EventMessageFmt, fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", expect.ConfigMap.Namespace, expect.ConfigMap.Name))
|
||||
}
|
||||
}
|
||||
if expectMessage != recent.Message {
|
||||
|
||||
Reference in New Issue
Block a user