Improve dynamic kubelet config e2e node test and fix bugs
Rather than just changing the config once to see if dynamic kubelet config at-least-sort-of-works, this extends the test to check that the Kubelet reports the expected Node condition and the expected configuration values after several possible state transitions. Additionally, this adds a stress test that changes the configuration 100 times. It is possible for resource leaks across Kubelet restarts to eventually prevent the Kubelet from restarting. For example, this test revealed that cAdvisor's leaking journalctl processes (see: https://github.com/google/cadvisor/issues/1725) could break dynamic kubelet config. This test will help reveal these problems earlier. This commit also makes better use of const strings and fixes a few bugs that the new testing turned up. Related issue: #50217
This commit is contained in:
@@ -29,10 +29,9 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
@@ -133,6 +132,7 @@ func isKubeletConfigEnabled(f *framework.Framework) (bool, error) {
|
||||
// Creates or updates the configmap for KubeletConfiguration, waits for the Kubelet to restart
|
||||
// with the new configuration. Returns an error if the configuration after waiting for restartGap
|
||||
// doesn't match what you attempted to set, or if the dynamic configuration feature is disabled.
|
||||
// You should only call this from serial tests.
|
||||
func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.KubeletConfiguration) error {
|
||||
const (
|
||||
restartGap = 40 * time.Second
|
||||
@@ -140,43 +140,36 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
)
|
||||
|
||||
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to reconfigure
|
||||
configEnabled, err := isKubeletConfigEnabled(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine whether 'DynamicKubeletConfig' feature is enabled, err: %v", err)
|
||||
}
|
||||
if !configEnabled {
|
||||
if configEnabled, err := isKubeletConfigEnabled(f); err != nil {
|
||||
return err
|
||||
} else if !configEnabled {
|
||||
return fmt.Errorf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
|
||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
|
||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
||||
}
|
||||
|
||||
nodeclient := f.ClientSet.CoreV1().Nodes()
|
||||
|
||||
// create the ConfigMap with the new configuration
|
||||
cm, err := createConfigMap(f, kubeCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create the correct reference object
|
||||
src := &v1.NodeConfigSource{
|
||||
ConfigMapRef: &v1.ObjectReference{
|
||||
// create the reference and set Node.Spec.ConfigSource
|
||||
src := &apiv1.NodeConfigSource{
|
||||
ConfigMapRef: &apiv1.ObjectReference{
|
||||
Namespace: "kube-system",
|
||||
Name: cm.Name,
|
||||
UID: cm.UID,
|
||||
},
|
||||
}
|
||||
|
||||
// serialize the new node config source
|
||||
raw, err := json.Marshal(src)
|
||||
framework.ExpectNoError(err)
|
||||
data := []byte(fmt.Sprintf(`{"spec":{"configSource":%s}}`, raw))
|
||||
|
||||
// patch the node
|
||||
_, err = nodeclient.Patch(framework.TestContext.NodeName,
|
||||
types.StrategicMergePatchType,
|
||||
data)
|
||||
framework.ExpectNoError(err)
|
||||
// set the source, retry a few times in case we are competing with other writers
|
||||
Eventually(func() error {
|
||||
if err := setNodeConfigSource(f, src); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second).Should(BeNil())
|
||||
|
||||
// poll for new config, for a maximum wait of restartGap
|
||||
Eventually(func() error {
|
||||
@@ -194,6 +187,41 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets the current node's configSource, this should only be called from Serial tests
|
||||
func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource) error {
|
||||
// since this is a serial test, we just get the node, change the source, and then update it
|
||||
// this prevents any issues with the patch API from affecting the test results
|
||||
nodeclient := f.ClientSet.CoreV1().Nodes()
|
||||
|
||||
// get the node
|
||||
node, err := nodeclient.Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set new source
|
||||
node.Spec.ConfigSource = source
|
||||
|
||||
// update to the new source
|
||||
_, err = nodeclient.Update(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getConfigOK returns the first NodeCondition in `cs` with Type == apiv1.NodeConfigOK,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getConfigOKCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeConfigOK {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Causes the test to fail, or returns a status 200 response from the /configz endpoint
|
||||
func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Response {
|
||||
endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/proxy/nodes/%s/configz", framework.TestContext.NodeName)
|
||||
@@ -248,8 +276,8 @@ func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, er
|
||||
}
|
||||
|
||||
// creates a configmap containing kubeCfg in kube-system namespace
|
||||
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
|
||||
cmap := makeKubeletConfigMap(internalKC)
|
||||
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*apiv1.ConfigMap, error) {
|
||||
cmap := newKubeletConfigMap("testcfg", internalKC)
|
||||
cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(cmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -257,8 +285,8 @@ func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletCo
|
||||
return cmap, nil
|
||||
}
|
||||
|
||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Uses GenerateName.
|
||||
func makeKubeletConfigMap(internalKC *kubeletconfig.KubeletConfiguration) *v1.ConfigMap {
|
||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
|
||||
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *apiv1.ConfigMap {
|
||||
scheme, _, err := kubeletscheme.NewSchemeAndCodecs()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -272,8 +300,8 @@ func makeKubeletConfigMap(internalKC *kubeletconfig.KubeletConfiguration) *v1.Co
|
||||
data, err := runtime.Encode(encoder, versioned)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cmap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "testcfg"},
|
||||
cmap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: name},
|
||||
Data: map[string]string{
|
||||
"kubelet": string(data),
|
||||
},
|
||||
@@ -293,7 +321,7 @@ func logNodeEvents(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func getLocalNode(f *framework.Framework) *v1.Node {
|
||||
func getLocalNode(f *framework.Framework) *apiv1.Node {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
|
||||
return &nodeList.Items[0]
|
||||
|
Reference in New Issue
Block a user