Merge pull request #125434 from ffromani/e2e-node-annotate-pods
e2e: node: add feature to annotate pods with owning code
This commit is contained in:
		| @@ -36,6 +36,7 @@ import ( | |||||||
| 	"k8s.io/kubectl/pkg/util/podutils" | 	"k8s.io/kubectl/pkg/util/podutils" | ||||||
|  |  | ||||||
| 	"github.com/onsi/ginkgo/v2" | 	"github.com/onsi/ginkgo/v2" | ||||||
|  | 	ginkgotypes "github.com/onsi/ginkgo/v2/types" | ||||||
| 	"github.com/onsi/gomega" | 	"github.com/onsi/gomega" | ||||||
|  |  | ||||||
| 	"k8s.io/kubernetes/test/e2e/framework" | 	"k8s.io/kubernetes/test/e2e/framework" | ||||||
| @@ -56,6 +57,19 @@ const ( | |||||||
|  |  | ||||||
| 	// it is copied from k8s.io/kubernetes/pkg/kubelet/sysctl | 	// it is copied from k8s.io/kubernetes/pkg/kubelet/sysctl | ||||||
| 	forbiddenReason = "SysctlForbidden" | 	forbiddenReason = "SysctlForbidden" | ||||||
|  |  | ||||||
|  | 	// which test created this pod? | ||||||
|  | 	AnnotationTestOwner = "owner.test" | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | // global flags so we can enable features per-suite instead of per-client. | ||||||
|  | var ( | ||||||
|  | 	// GlobalOwnerTracking controls if newly created PodClients should automatically annotate | ||||||
|  | 	// the pod with the owner test. The owner test is identified by "sourcecodepath:linenumber". | ||||||
|  | 	// Annotating the pods this way is useful to troubleshoot tests which do insufficient cleanup. | ||||||
|  | 	// Default is false to maximize backward compatibility. | ||||||
|  | 	// See also: WithOwnerTracking, AnnotationTestOwner | ||||||
|  | 	GlobalOwnerTracking bool | ||||||
| ) | ) | ||||||
|  |  | ||||||
| // ImagePrePullList is the images used in the current test suite. It should be initialized in test suite and | // ImagePrePullList is the images used in the current test suite. It should be initialized in test suite and | ||||||
| @@ -68,9 +82,10 @@ var ImagePrePullList sets.String | |||||||
| // node e2e pod scheduling. | // node e2e pod scheduling. | ||||||
| func NewPodClient(f *framework.Framework) *PodClient { | func NewPodClient(f *framework.Framework) *PodClient { | ||||||
| 	return &PodClient{ | 	return &PodClient{ | ||||||
| 		f:            f, | 		f:             f, | ||||||
| 		PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name), | 		PodInterface:  f.ClientSet.CoreV1().Pods(f.Namespace.Name), | ||||||
| 		namespace:    f.Namespace.Name, | 		namespace:     f.Namespace.Name, | ||||||
|  | 		ownerTracking: GlobalOwnerTracking, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -79,9 +94,10 @@ func NewPodClient(f *framework.Framework) *PodClient { | |||||||
| // node e2e pod scheduling. | // node e2e pod scheduling. | ||||||
| func PodClientNS(f *framework.Framework, namespace string) *PodClient { | func PodClientNS(f *framework.Framework, namespace string) *PodClient { | ||||||
| 	return &PodClient{ | 	return &PodClient{ | ||||||
| 		f:            f, | 		f:             f, | ||||||
| 		PodInterface: f.ClientSet.CoreV1().Pods(namespace), | 		PodInterface:  f.ClientSet.CoreV1().Pods(namespace), | ||||||
| 		namespace:    namespace, | 		namespace:     namespace, | ||||||
|  | 		ownerTracking: GlobalOwnerTracking, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -89,19 +105,34 @@ func PodClientNS(f *framework.Framework, namespace string) *PodClient { | |||||||
| type PodClient struct { | type PodClient struct { | ||||||
| 	f *framework.Framework | 	f *framework.Framework | ||||||
| 	v1core.PodInterface | 	v1core.PodInterface | ||||||
| 	namespace string | 	namespace     string | ||||||
|  | 	ownerTracking bool | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // WithOwnerTracking controls automatic add of annotations recording the code location | ||||||
|  | // which created a pod. This is helpful when troubleshooting e2e tests (like e2e_node) | ||||||
|  | // which leak pods because insufficient cleanup. | ||||||
|  | // Note we want a shallow clone to avoid mutating the receiver. | ||||||
|  | // The default is the value of GlobalOwnerTracking *when the client was created*. | ||||||
|  | func (c PodClient) WithOwnerTracking(value bool) *PodClient { | ||||||
|  | 	c.ownerTracking = value | ||||||
|  | 	return &c | ||||||
| } | } | ||||||
|  |  | ||||||
| // Create creates a new pod according to the framework specifications (don't wait for it to start). | // Create creates a new pod according to the framework specifications (don't wait for it to start). | ||||||
| func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod { | func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod { | ||||||
|  | 	ginkgo.GinkgoHelper() | ||||||
| 	c.mungeSpec(pod) | 	c.mungeSpec(pod) | ||||||
|  | 	c.setOwnerAnnotation(pod) | ||||||
| 	p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{}) | 	p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{}) | ||||||
| 	framework.ExpectNoError(err, "Error creating Pod") | 	framework.ExpectNoError(err, "Error creating Pod") | ||||||
| 	return p | 	return p | ||||||
|  |  | ||||||
| } | } | ||||||
|  |  | ||||||
| // CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready. | // CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready. | ||||||
| func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod { | func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod { | ||||||
|  | 	ginkgo.GinkgoHelper() | ||||||
| 	p := c.Create(ctx, pod) | 	p := c.Create(ctx, pod) | ||||||
| 	framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, c.namespace, framework.PodStartTimeout)) | 	framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, c.namespace, framework.PodStartTimeout)) | ||||||
| 	// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip. | 	// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip. | ||||||
| @@ -112,6 +143,7 @@ func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod { | |||||||
|  |  | ||||||
| // CreateBatch create a batch of pods. All pods are created before waiting. | // CreateBatch create a batch of pods. All pods are created before waiting. | ||||||
| func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod { | func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod { | ||||||
|  | 	ginkgo.GinkgoHelper() | ||||||
| 	ps := make([]*v1.Pod, len(pods)) | 	ps := make([]*v1.Pod, len(pods)) | ||||||
| 	var wg sync.WaitGroup | 	var wg sync.WaitGroup | ||||||
| 	for i, pod := range pods { | 	for i, pod := range pods { | ||||||
| @@ -192,6 +224,19 @@ func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1. | |||||||
| 	framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, c.namespace, timeout), "wait for pod %q to disappear", name) | 	framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, c.namespace, timeout), "wait for pod %q to disappear", name) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // addTestOrigin adds annotations to help identifying tests which incorrectly leak pods because insufficient cleanup | ||||||
|  | func (c *PodClient) setOwnerAnnotation(pod *v1.Pod) { | ||||||
|  | 	if !c.ownerTracking { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	ginkgo.GinkgoHelper() | ||||||
|  | 	location := ginkgotypes.NewCodeLocation(0) | ||||||
|  | 	if pod.Annotations == nil { | ||||||
|  | 		pod.Annotations = make(map[string]string) | ||||||
|  | 	} | ||||||
|  | 	pod.Annotations[AnnotationTestOwner] = fmt.Sprintf("%s:%d", location.FileName, location.LineNumber) | ||||||
|  | } | ||||||
|  |  | ||||||
| // mungeSpec apply test-suite specific transformations to the pod spec. | // mungeSpec apply test-suite specific transformations to the pod spec. | ||||||
| func (c *PodClient) mungeSpec(pod *v1.Pod) { | func (c *PodClient) mungeSpec(pod *v1.Pod) { | ||||||
| 	if !framework.TestContext.NodeE2E { | 	if !framework.TestContext.NodeE2E { | ||||||
|   | |||||||
| @@ -44,6 +44,7 @@ import ( | |||||||
| 	commontest "k8s.io/kubernetes/test/e2e/common" | 	commontest "k8s.io/kubernetes/test/e2e/common" | ||||||
| 	"k8s.io/kubernetes/test/e2e/framework" | 	"k8s.io/kubernetes/test/e2e/framework" | ||||||
| 	e2econfig "k8s.io/kubernetes/test/e2e/framework/config" | 	e2econfig "k8s.io/kubernetes/test/e2e/framework/config" | ||||||
|  | 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||||
| 	e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" | 	e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" | ||||||
| 	e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" | 	e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" | ||||||
| @@ -213,6 +214,11 @@ func TestE2eNode(t *testing.T) { | |||||||
| 			klog.Errorf("Failed creating report directory: %v", err) | 			klog.Errorf("Failed creating report directory: %v", err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | 	// annotate created pods with source code location to make it easier to find tests | ||||||
|  | 	// which do insufficient cleanup and pollute the node state with lingering pods | ||||||
|  | 	e2epod.GlobalOwnerTracking = true | ||||||
|  |  | ||||||
| 	suiteConfig, reporterConfig := framework.CreateGinkgoConfig() | 	suiteConfig, reporterConfig := framework.CreateGinkgoConfig() | ||||||
| 	ginkgo.RunSpecs(t, "E2eNode Suite", suiteConfig, reporterConfig) | 	ginkgo.RunSpecs(t, "E2eNode Suite", suiteConfig, reporterConfig) | ||||||
| } | } | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Kubernetes Prow Robot
					Kubernetes Prow Robot