Fix up pod hostIPs e2e
- The feature is GA so there's no feature gate so it doesn't need any special label now. - The test is not dual-stack-specific, so it shouldn't claim to be. - It asserted node-IP-assigning behavior that is not guaranteed to work on all clouds. (Among other things: that there are no "extra" InternalIPs, and that there are InternalIPs of every supported IP family, rather than there only being ExternalIPs of some families.)
This commit is contained in:
		@@ -229,9 +229,6 @@ var (
 | 
				
			|||||||
	// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
						// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
				
			||||||
	PodGarbageCollector = framework.WithFeature(framework.ValidFeatures.Add("PodGarbageCollector"))
 | 
						PodGarbageCollector = framework.WithFeature(framework.ValidFeatures.Add("PodGarbageCollector"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
					 | 
				
			||||||
	PodHostIPs = framework.WithFeature(framework.ValidFeatures.Add("PodHostIPs"))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
						// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
				
			||||||
	PodLifecycleSleepAction = framework.WithFeature(framework.ValidFeatures.Add("PodLifecycleSleepAction"))
 | 
						PodLifecycleSleepAction = framework.WithFeature(framework.ValidFeatures.Add("PodLifecycleSleepAction"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -85,9 +85,6 @@ var (
 | 
				
			|||||||
	// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
						// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
				
			||||||
	PodDisruptionConditions = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodDisruptionConditions"))
 | 
						PodDisruptionConditions = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodDisruptionConditions"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
					 | 
				
			||||||
	PodHostIPs = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodHostIPs"))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
						// TODO: document the feature (owning SIG, when to use this feature for a test)
 | 
				
			||||||
	PodResources = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodResources"))
 | 
						PodResources = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodResources"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,7 +31,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
						utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
				
			||||||
	kubefeatures "k8s.io/kubernetes/pkg/features"
 | 
						kubefeatures "k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/feature"
 | 
						utilnode "k8s.io/kubernetes/pkg/util/node"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
 | 
						e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
 | 
				
			||||||
	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
						e2enode "k8s.io/kubernetes/test/e2e/framework/node"
 | 
				
			||||||
@@ -39,18 +39,16 @@ import (
 | 
				
			|||||||
	e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
 | 
						e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
 | 
				
			||||||
	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
 | 
						e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/network/common"
 | 
						"k8s.io/kubernetes/test/e2e/network/common"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/nodefeature"
 | 
					 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
	admissionapi "k8s.io/pod-security-admission/api"
 | 
						admissionapi "k8s.io/pod-security-admission/api"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var _ = common.SIGDescribe("DualStack Host IP", framework.WithSerial(), nodefeature.PodHostIPs, feature.PodHostIPs, func() {
 | 
					var _ = common.SIGDescribe("Pod Host IPs", framework.WithSerial(), func() {
 | 
				
			||||||
	f := framework.NewDefaultFramework("dualstack")
 | 
						f := framework.NewDefaultFramework("host-ips")
 | 
				
			||||||
	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
 | 
						f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.Context("when creating a Pod", func() {
 | 
						ginkgo.Context("when creating a Pod", func() {
 | 
				
			||||||
		ginkgo.It("should create pod, add ipv6 and ipv4 ip to host ips", func(ctx context.Context) {
 | 
							ginkgo.It("should add node IPs of all supported families to hostIPs of pod-network pod", func(ctx context.Context) {
 | 
				
			||||||
 | 
					 | 
				
			||||||
			podName := "pod-dualstack-host-ips"
 | 
								podName := "pod-dualstack-host-ips"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			pod := genPodHostIPs(podName+string(uuid.NewUUID()), false)
 | 
								pod := genPodHostIPs(podName+string(uuid.NewUUID()), false)
 | 
				
			||||||
@@ -71,28 +69,17 @@ var _ = common.SIGDescribe("DualStack Host IP", framework.WithSerial(), nodefeat
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
 | 
								ginkgo.By("comparing pod.Status.HostIPs against node.Status.Addresses")
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								hostIPs, err := genHostIPsForNode(ctx, f, p.Spec.NodeName)
 | 
				
			||||||
			for _, node := range nodeList.Items {
 | 
								framework.ExpectNoError(err, "failed to fetch node IPs")
 | 
				
			||||||
				if node.Name == p.Spec.NodeName {
 | 
								gomega.Expect(p.Status.HostIPs).Should(gomega.Equal(hostIPs))
 | 
				
			||||||
					nodeIPs := []v1.HostIP{}
 | 
					 | 
				
			||||||
					for _, address := range node.Status.Addresses {
 | 
					 | 
				
			||||||
						if address.Type == v1.NodeInternalIP {
 | 
					 | 
				
			||||||
							nodeIPs = append(nodeIPs, v1.HostIP{IP: address.Address})
 | 
					 | 
				
			||||||
						}
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
					gomega.Expect(p.Status.HostIPs).Should(gomega.Equal(nodeIPs))
 | 
					 | 
				
			||||||
					break
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("deleting the pod")
 | 
								ginkgo.By("deleting the pod")
 | 
				
			||||||
			err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(1))
 | 
								err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(1))
 | 
				
			||||||
			framework.ExpectNoError(err, "failed to delete pod")
 | 
								framework.ExpectNoError(err, "failed to delete pod")
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("should create pod with hostNetwork, add ipv6 and ipv4 ip to host ips", func(ctx context.Context) {
 | 
							ginkgo.It("should add node IPs of all supported families to hostIPs of host-network pod", func(ctx context.Context) {
 | 
				
			||||||
 | 
					 | 
				
			||||||
			podName := "pod-dualstack-host-ips"
 | 
								podName := "pod-dualstack-host-ips"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			pod := genPodHostIPs(podName+string(uuid.NewUUID()), true)
 | 
								pod := genPodHostIPs(podName+string(uuid.NewUUID()), true)
 | 
				
			||||||
@@ -113,20 +100,10 @@ var _ = common.SIGDescribe("DualStack Host IP", framework.WithSerial(), nodefeat
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
 | 
								ginkgo.By("comparing pod.Status.HostIPs against node.Status.Addresses")
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
								hostIPs, err := genHostIPsForNode(ctx, f, p.Spec.NodeName)
 | 
				
			||||||
			for _, node := range nodeList.Items {
 | 
								framework.ExpectNoError(err, "failed to fetch node IPs")
 | 
				
			||||||
				if node.Name == p.Spec.NodeName {
 | 
								gomega.Expect(p.Status.HostIPs).Should(gomega.Equal(hostIPs))
 | 
				
			||||||
					nodeIPs := []v1.HostIP{}
 | 
					 | 
				
			||||||
					for _, address := range node.Status.Addresses {
 | 
					 | 
				
			||||||
						if address.Type == v1.NodeInternalIP {
 | 
					 | 
				
			||||||
							nodeIPs = append(nodeIPs, v1.HostIP{IP: address.Address})
 | 
					 | 
				
			||||||
						}
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
					gomega.Expect(p.Status.HostIPs).Should(gomega.Equal(nodeIPs))
 | 
					 | 
				
			||||||
					break
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("deleting the pod")
 | 
								ginkgo.By("deleting the pod")
 | 
				
			||||||
			err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(1))
 | 
								err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(1))
 | 
				
			||||||
@@ -179,6 +156,27 @@ func genPodHostIPs(podName string, hostNetwork bool) *v1.Pod {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func genHostIPsForNode(ctx context.Context, f *framework.Framework, nodeName string) ([]v1.HostIP, error) {
 | 
				
			||||||
 | 
						nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						for _, node := range nodeList.Items {
 | 
				
			||||||
 | 
							if node.Name == nodeName {
 | 
				
			||||||
 | 
								nodeIPs, err := utilnode.GetNodeHostIPs(&node)
 | 
				
			||||||
 | 
								if err != nil {
 | 
				
			||||||
 | 
									return nil, err
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								hostIPs := []v1.HostIP{}
 | 
				
			||||||
 | 
								for _, ip := range nodeIPs {
 | 
				
			||||||
 | 
									hostIPs = append(hostIPs, v1.HostIP{IP: ip.String()})
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								return hostIPs, nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return nil, fmt.Errorf("no such node %q", nodeName)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func testDownwardAPI(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
 | 
					func testDownwardAPI(ctx context.Context, f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
 | 
				
			||||||
	pod := &v1.Pod{
 | 
						pod := &v1.Pod{
 | 
				
			||||||
		ObjectMeta: metav1.ObjectMeta{
 | 
							ObjectMeta: metav1.ObjectMeta{
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user