e2e framework: eliminate interim sub packages

The "todo" packages were necessary while moving code around to avoid hitting
cyclic dependencies. Now that any sub package can depend on the framework, they
are no longer needed and the code can be moved into the normal sub packages.
This commit is contained in:
Patrick Ohly 2022-08-25 20:56:04 +02:00
parent 2d21acb1be
commit 5614a9d064
53 changed files with 249 additions and 282 deletions

View File

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -80,7 +80,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
e2etodopod.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1",
})
})
@ -124,7 +124,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
e2etodopod.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
})

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
@ -44,7 +44,7 @@ type ConformanceContainer struct {
Volumes []v1.Volume
ImagePullSecrets []string
PodClient *e2etodopod.PodClient
PodClient *e2epod.PodClient
podName string
PodSecurityContext *v1.PodSecurityContext
}

View File

@ -40,7 +40,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -58,11 +57,11 @@ const (
var _ = SIGDescribe("Probing container", func() {
f := framework.NewDefaultFramework("container-probe")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
probe := webserverProbeBuilder{}
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
/*
@ -562,7 +561,7 @@ var _ = SIGDescribe("Probing container", func() {
ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func() {
podName := "probe-test-" + string(uuid.NewUUID())
podClient := e2etodopod.NewPodClient(f)
podClient := e2epod.NewPodClient(f)
terminationGracePeriod := int64(30)
script := `
_term() {
@ -626,7 +625,7 @@ done
ginkgo.It("should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating", func() {
podName := "probe-test-" + string(uuid.NewUUID())
podClient := e2etodopod.NewPodClient(f)
podClient := e2epod.NewPodClient(f)
terminationGracePeriod := int64(30)
script := `
_term() {
@ -938,7 +937,7 @@ func (b webserverProbeBuilder) build() *v1.Probe {
// RunLivenessTest verifies the number of restarts for pod with given expected number of restarts
func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := e2etodopod.NewPodClient(f)
podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
containerName := pod.Spec.Containers[0].Name
@ -998,7 +997,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
}
func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) {
podClient := e2etodopod.NewPodClient(f)
podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -39,7 +39,7 @@ var _ = SIGDescribe("Containers", func() {
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name)
pod.Spec.Containers[0].Args = nil
pod = e2etodopod.NewPodClient(f).Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Expected pod %q to be running, got error: %v", pod.Name, err)
pollLogs := func() (string, error) {
@ -58,7 +58,7 @@ var _ = SIGDescribe("Containers", func() {
*/
framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
e2etodopod.TestContainerOutput(f, "override arguments", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "override arguments", pod, 0, []string{
"[/agnhost entrypoint-tester override arguments]",
})
})
@ -74,7 +74,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
e2etodopod.TestContainerOutput(f, "override command", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "override command", pod, 0, []string{
"[/agnhost-2 entrypoint-tester]",
})
})
@ -88,7 +88,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
e2etodopod.TestContainerOutput(f, "override all", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "override all", pod, 0, []string{
"[/agnhost-2 entrypoint-tester override arguments]",
})
})

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -419,5 +419,5 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2etodopod.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
}

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -35,9 +34,9 @@ import (
var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
f := framework.NewDefaultFramework("ephemeral-containers-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
// Release: 1.25
@ -75,7 +74,7 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
ginkgo.By("checking pod container endpoints")
// Can't use anything depending on kubectl here because it's not available in the node test environment
output := e2etodopod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco")
output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco")
gomega.Expect(output).To(gomega.ContainSubstring("marco"))
log, err := e2epod.GetPodLogs(f.ClientSet, pod.Namespace, pod.Name, ecName)
framework.ExpectNoError(err, "Failed to get logs for pod %q ephemeral container %q", format.Pod(pod), ecName)

View File

@ -22,7 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -58,7 +58,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
}
pod := newPod([]string{"sh", "-c", "env"}, envVars, nil, nil)
e2etodopod.TestContainerOutput(f, "env composition", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "env composition", pod, 0, []string{
"FOO=foo-value",
"BAR=bar-value",
"FOOBAR=foo-value;;bar-value",
@ -79,7 +79,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
}
pod := newPod([]string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""}, envVars, nil, nil)
e2etodopod.TestContainerOutput(f, "substitution in container's command", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "substitution in container's command", pod, 0, []string{
"test-value",
})
})
@ -99,7 +99,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod([]string{"sh", "-c"}, envVars, nil, nil)
pod.Spec.Containers[0].Args = []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""}
e2etodopod.TestContainerOutput(f, "substitution in container's args", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "substitution in container's args", pod, 0, []string{
"test-value",
})
})
@ -139,7 +139,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
envVars[0].Value = pod.ObjectMeta.Name
pod.Spec.Containers[0].Command = []string{"sh", "-c", "test -d /testcontainer/" + pod.ObjectMeta.Name + ";echo $?"}
e2etodopod.TestContainerOutput(f, "substitution in volume subpath", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "substitution in volume subpath", pod, 0, []string{
"0",
})
})
@ -262,7 +262,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod.ObjectMeta.Annotations = map[string]string{"notmysubpath": "mypath"}
ginkgo.By("creating the pod with failed condition")
podClient := e2etodopod.NewPodClient(f)
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
@ -334,7 +334,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod.ObjectMeta.Annotations = map[string]string{"mysubpath": "mypath"}
ginkgo.By("creating the pod")
podClient := e2etodopod.NewPodClient(f)
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
ginkgo.By("waiting for pod running")
@ -343,14 +343,14 @@ var _ = SIGDescribe("Variable Expansion", func() {
ginkgo.By("creating a file in subpath")
cmd := "touch /volume_mount/mypath/foo/test.log"
_, _, err = e2etodopod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to write to subpath")
}
ginkgo.By("test for file in mounted path")
cmd = "test -f /subpath_mount/test.log"
_, _, err = e2etodopod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to verify file")
}
@ -371,7 +371,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
})
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
podClient := e2etodopod.NewPodClient(f)
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
defer func() {

View File

@ -39,7 +39,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -161,9 +161,9 @@ func initContainersInvariants(pod *v1.Pod) error {
var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
f := framework.NewDefaultFramework("init-container")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
/*

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
@ -38,9 +37,9 @@ import (
var _ = SIGDescribe("Kubelet", func() {
f := framework.NewDefaultFramework("kubelet-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
ginkgo.Context("when scheduling a busybox command in a pod", func() {
podName := "busybox-scheduling-" + string(uuid.NewUUID())

View File

@ -26,7 +26,6 @@ import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -93,12 +92,12 @@ func (config *KubeletManagedHostConfig) setup() {
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
podSpec := config.createPodSpec(etcHostsPodName)
config.pod = e2etodopod.NewPodClient(config.f).CreateSync(podSpec)
config.pod = e2epod.NewPodClient(config.f).CreateSync(podSpec)
}
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
config.hostNetworkPod = e2etodopod.NewPodClient(config.f).CreateSync(podSpec)
config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(podSpec)
}
func assertManagedStatus(
@ -149,7 +148,7 @@ func assertManagedStatus(
}
func (config *KubeletManagedHostConfig) getFileContents(podName, containerName, path string) string {
return e2etodopod.ExecCommandInContainer(config.f, podName, containerName, "cat", path)
return e2epod.ExecCommandInContainer(config.f, podName, containerName, "cat", path)
}
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -38,7 +37,7 @@ import (
var _ = SIGDescribe("Container Lifecycle Hook", func() {
f := framework.NewDefaultFramework("container-lifecycle-hook")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
const (
podCheckInterval = 1 * time.Second
postStartWaitTimeout = 2 * time.Minute
@ -61,7 +60,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podHandleHookRequest.Spec, nodeSelection)
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
ginkgo.By("create the container to handle the HTTPGet hook request.")
newPod := podClient.CreateSync(podHandleHookRequest)
targetIP = newPod.Status.PodIP
@ -81,7 +80,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
}
ginkgo.By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), e2etodopod.DefaultPodDeletionTimeout)
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
ginkgo.By("check prestop hook")
gomega.Eventually(func() error {

View File

@ -28,7 +28,6 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -58,7 +57,7 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() {
NodeName: linuxNode.Name, // Set the node to an node which doesn't support
},
}
pod = e2etodopod.NewPodClient(f).Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
// Check the pod is still not running
err = e2epod.WaitForPodFailedReason(f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort)
framework.ExpectNoError(err)

View File

@ -51,7 +51,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2ewebsocket "k8s.io/kubernetes/test/e2e/framework/websocket"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -69,7 +69,7 @@ const (
)
// testHostIP tests that a pod gets a host IP
func testHostIP(podClient *e2etodopod.PodClient, pod *v1.Pod) {
func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) {
ginkgo.By("creating pod")
podClient.CreateSync(pod)
@ -92,7 +92,7 @@ func testHostIP(podClient *e2etodopod.PodClient, pod *v1.Pod) {
}
}
func startPodAndGetBackOffs(podClient *e2etodopod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
func startPodAndGetBackOffs(podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod)
time.Sleep(sleepAmount)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
@ -119,7 +119,7 @@ func startPodAndGetBackOffs(podClient *e2etodopod.PodClient, pod *v1.Pod, sleepA
return delay1, delay2
}
func getRestartDelay(podClient *e2etodopod.PodClient, podName string, containerName string) (time.Duration, error) {
func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) {
beginTime := time.Now()
var previousRestartCount int32 = -1
var previousFinishedAt time.Time
@ -188,11 +188,11 @@ func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interf
var _ = SIGDescribe("Pods", func() {
f := framework.NewDefaultFramework("pods")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelRestricted
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
var dc dynamic.Interface
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
dc = f.DynamicClient
})
@ -306,7 +306,7 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("verifying pod deletion was observed")
deleted := false
var lastPod *v1.Pod
timer := time.After(e2etodopod.DefaultPodDeletionTimeout)
timer := time.After(e2epod.DefaultPodDeletionTimeout)
for !deleted {
select {
case event := <-w.ResultChan():
@ -523,7 +523,7 @@ var _ = SIGDescribe("Pods", func() {
"FOOSERVICE_PORT_8765_TCP_ADDR=",
}
expectNoErrorWithRetries(func() error {
return e2etodopod.MatchContainerOutput(f, pod, containerName, expectedVars, gomega.ContainSubstring)
return e2epodoutput.MatchContainerOutput(f, pod, containerName, expectedVars, gomega.ContainSubstring)
}, maxRetries, "Container should have service environment variables set")
})
@ -808,7 +808,7 @@ var _ = SIGDescribe("Pods", func() {
}
ginkgo.By("submitting the pod to kubernetes")
e2etodopod.NewPodClient(f).Create(pod)
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
if podClient.PodIsReady(podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name)

View File

@ -23,7 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -67,7 +67,7 @@ func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool)
cmd := []string{"ip", "link", "add", "dummy1", "type", "dummy"}
reverseCmd := []string{"ip", "link", "del", "dummy1"}
stdout, stderr, err := e2etodopod.ExecCommandInContainerWithFullOutput(
stdout, stderr, err := e2epod.ExecCommandInContainerWithFullOutput(
c.f, c.privilegedPod, containerName, cmd...)
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
@ -75,7 +75,7 @@ func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool)
framework.ExpectNoError(err, msg)
// We need to clean up the dummy link that was created, as it
// leaks out into the node level -- yuck.
_, _, err := e2etodopod.ExecCommandInContainerWithFullOutput(
_, _, err := e2epod.ExecCommandInContainerWithFullOutput(
c.f, c.privilegedPod, containerName, reverseCmd...)
framework.ExpectNoError(err,
fmt.Sprintf("could not remove dummy1 link: %v", err))
@ -116,5 +116,5 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
func (c *PrivilegedPodTestConfig) createPods() {
podSpec := c.createPodsSpec()
c.pod = e2etodopod.NewPodClient(c.f).CreateSync(podSpec)
c.pod = e2epod.NewPodClient(c.f).CreateSync(podSpec)
}

View File

@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -99,7 +99,7 @@ while true; do sleep 1; done
testContainer.Name = testCase.Name
testContainer.Command = []string{"sh", "-c", tmpCmd}
terminateContainer := ConformanceContainer{
PodClient: e2etodopod.NewPodClient(f),
PodClient: e2epod.NewPodClient(f),
Container: testContainer,
RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes,
@ -144,7 +144,7 @@ while true; do sleep 1; done
matchTerminationMessage := func(container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) {
container.Name = "termination-message-container"
c := ConformanceContainer{
PodClient: e2etodopod.NewPodClient(f),
PodClient: e2epod.NewPodClient(f),
Container: container,
RestartPolicy: v1.RestartPolicyNever,
}
@ -269,7 +269,7 @@ while true; do sleep 1; done
command = []string{"ping", "-t", "localhost"}
}
container := ConformanceContainer{
PodClient: e2etodopod.NewPodClient(f),
PodClient: e2epod.NewPodClient(f),
Container: v1.Container{
Name: "image-pull-test",
Image: image,

View File

@ -38,7 +38,6 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
@ -63,7 +62,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil)
defer deleteRuntimeClass(f, rcName)
pod := e2etodopod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
@ -90,7 +89,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName)
pod := e2etodopod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(f, pod)
})
@ -105,7 +104,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func() {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName)
pod := e2etodopod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
@ -135,7 +134,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
},
})
defer deleteRuntimeClass(f, rcName)
pod := e2etodopod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -82,7 +82,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
e2etodopod.TestContainerOutput(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{
"SECRET_DATA=value-1",
})
})
@ -126,7 +126,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
e2etodopod.TestContainerOutput(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
})

View File

@ -28,8 +28,8 @@ import (
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/pointer"
@ -46,9 +46,9 @@ var (
var _ = SIGDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
ginkgo.Context("When creating a pod with HostUsers", func() {
@ -74,14 +74,14 @@ var _ = SIGDescribe("Security Context", func() {
ginkgo.It("must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() {
// with hostUsers=false the pod must use a new user namespace
podClient := e2etodopod.PodClientNS(f, f.Namespace.Name)
podClient := e2epod.PodClientNS(f, f.Namespace.Name)
createdPod1 := podClient.Create(makePod(false))
createdPod2 := podClient.Create(makePod(false))
defer func() {
ginkgo.By("delete the pods")
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2etodopod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2etodopod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}()
getLogs := func(pod *v1.Pod) (string, error) {
err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
@ -116,7 +116,7 @@ var _ = SIGDescribe("Security Context", func() {
// When running in the host's user namespace, the /proc/self/uid_map file content looks like:
// 0 0 4294967295
// Verify the value 4294967295 is present in the output.
e2etodopod.TestContainerOutput(f, "read namespace", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "read namespace", pod, 0, []string{
"4294967295",
})
})
@ -240,7 +240,7 @@ var _ = SIGDescribe("Security Context", func() {
// Each line should be "=0" that means root inside the container is the owner of the file.
downwardAPIVolFiles := 1
projectedFiles := len(secret.Data) + downwardAPIVolFiles
e2etodopod.TestContainerOutput(f, "check file permissions", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "check file permissions", pod, 0, []string{
strings.Repeat("=0\n", len(secret.Data)+len(configMap.Data)+downwardAPIVolFiles+projectedFiles),
})
})
@ -300,7 +300,7 @@ var _ = SIGDescribe("Security Context", func() {
// Expect one line for each file on all the volumes.
// Each line should be "=200" (fsGroup) that means it was mapped to the
// right user inside the container.
e2etodopod.TestContainerOutput(f, "check FSGroup is mapped correctly", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "check FSGroup is mapped correctly", pod, 0, []string{
strings.Repeat(fmt.Sprintf("=%v\n", fsGroup), len(configMap.Data)),
})
})

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -42,7 +41,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
f := framework.NewDefaultFramework("sysctl")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
testPod := func() *v1.Pod {
podName := "sysctl-" + string(uuid.NewUUID())
@ -66,7 +65,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
}
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
/*
@ -94,7 +93,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := e2etodopod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil())
@ -202,7 +201,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := e2etodopod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil())

View File

@ -29,8 +29,8 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -149,7 +149,7 @@ var _ = SIGDescribe("ConfigMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1")
ginkgo.By("Creating the pod")
e2etodopod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@ -216,7 +216,7 @@ var _ = SIGDescribe("ConfigMap", func() {
})
ginkgo.By("Creating the pod")
e2etodopod.NewPodClient(f).Create(pod)
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
pollLogs1 := func() (string, error) {
@ -375,7 +375,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
ginkgo.By("Creating the pod")
e2etodopod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -486,7 +486,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
e2etodopod.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
})
@ -622,7 +622,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup
"content of file \"/etc/configmap-volume/data-1\": value-1",
fileModeRegexp,
}
e2etodopod.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
@ -674,7 +674,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int
fileModeRegexp := getFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp)
}
e2etodopod.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
@ -691,7 +691,7 @@ func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath strin
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue
ginkgo.By("Creating the pod")
pod = e2etodopod.NewPodClient(f).Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}
@ -721,7 +721,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
}
ginkgo.By("Creating the pod")
pod = e2etodopod.NewPodClient(f).Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}

View File

@ -24,7 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -134,5 +134,5 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
e2etodopod.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
}

View File

@ -26,8 +26,8 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -40,9 +40,9 @@ var _ = SIGDescribe("Downward API volume", func() {
const podLogTimeout = 3 * time.Minute
f := framework.NewDefaultFramework("downward-api")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
/*
@ -54,7 +54,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -70,7 +70,7 @@ var _ = SIGDescribe("Downward API volume", func() {
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -86,7 +86,7 @@ var _ = SIGDescribe("Downward API volume", func() {
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -101,7 +101,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -117,7 +117,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----",
})
})
@ -194,7 +194,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
@ -208,7 +208,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
@ -222,7 +222,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
@ -236,7 +236,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
@ -250,7 +250,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
e2etodopod.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
@ -262,7 +262,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
e2etodopod.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})

View File

@ -28,8 +28,8 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -283,11 +283,11 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
}
ginkgo.By("Creating Pod")
e2etodopod.NewPodClient(f).Create(pod)
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
ginkgo.By("Reading file content from the nginx-container")
result := e2etodopod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath))
result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath))
framework.ExpectEqual(result, message, "failed to match expected string %s with %s", message, resultString)
})
@ -343,18 +343,18 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
var err error
ginkgo.By("Creating Pod")
pod = e2etodopod.NewPodClient(f).CreateSync(pod)
pod = e2epod.NewPodClient(f).CreateSync(pod)
ginkgo.By("Waiting for the pod running")
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
ginkgo.By("Getting the pod")
pod, err = e2etodopod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %s", pod.Name)
ginkgo.By("Reading empty dir size")
result := e2etodopod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("df | grep %s | awk '{print $2}'", busyBoxMainVolumeMountPath))
result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("df | grep %s | awk '{print $2}'", busyBoxMainVolumeMountPath))
framework.ExpectEqual(result, expectedResult, "failed to match expected string %s with %s", expectedResult, result)
})
})
@ -391,7 +391,7 @@ func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMed
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -424,7 +424,7 @@ func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMe
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -449,7 +449,7 @@ func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.Storag
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -477,7 +477,7 @@ func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMediu
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -499,7 +499,7 @@ func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -524,7 +524,7 @@ func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -549,7 +549,7 @@ func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -574,7 +574,7 @@ func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
e2etodopod.TestContainerOutput(f, msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func formatMedium(medium v1.StorageMedium) string {

View File

@ -24,7 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -59,7 +59,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath),
}
e2etodopod.TestContainerOutputRegexp(f, "hostPath mode", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(f, "hostPath mode", pod, 0, []string{
"mode of file \"/test-volume\": dg?trwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
})
})
@ -88,7 +88,7 @@ var _ = SIGDescribe("HostPath", func() {
}
//Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod.
e2etodopod.TestContainerOutput(f, "hostPath r/w", pod, 1, []string{
e2epodoutput.TestContainerOutput(f, "hostPath r/w", pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file",
})
})
@ -125,7 +125,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--retry_time=%d", retryDuration),
}
e2etodopod.TestContainerOutput(f, "hostPath subPath", pod, 1, []string{
e2epodoutput.TestContainerOutput(f, "hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})

View File

@ -24,7 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -89,7 +89,7 @@ var _ = SIGDescribe("Projected combined", func() {
},
},
}
e2etodopod.TestContainerOutput(f, "Check all projections for projected volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "Check all projections for projected volume plugin", pod, 0, []string{
podName,
"secret-value-1",
"configmap-value-1",

View File

@ -26,8 +26,8 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -148,7 +148,7 @@ var _ = SIGDescribe("Projected configMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1")
ginkgo.By("Creating the pod")
e2etodopod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@ -327,7 +327,7 @@ var _ = SIGDescribe("Projected configMap", func() {
},
}
ginkgo.By("Creating the pod")
e2etodopod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -451,7 +451,7 @@ var _ = SIGDescribe("Projected configMap", func() {
},
}
e2etodopod.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
})
@ -513,7 +513,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool,
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
fileModeRegexp,
}
e2etodopod.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
@ -564,7 +564,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs
fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp)
}
e2etodopod.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func createProjectedConfigMapMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {

View File

@ -25,8 +25,8 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -40,9 +40,9 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
var podClient *e2etodopod.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2etodopod.NewPodClient(f)
podClient = e2epod.NewPodClient(f)
})
/*
@ -54,7 +54,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -70,7 +70,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
defaultMode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -86,7 +86,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
mode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -101,7 +101,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -117,7 +117,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----",
})
})
@ -194,7 +194,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
@ -208,7 +208,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
@ -222,7 +222,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
@ -236,7 +236,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
e2etodopod.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
@ -250,7 +250,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
e2etodopod.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
@ -262,7 +262,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
e2etodopod.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})

View File

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -201,7 +201,7 @@ var _ = SIGDescribe("Projected secret", func() {
}
fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", nil)
e2etodopod.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
"content of file \"/etc/projected-secret-volume/data-1\": value-1",
fileModeRegexp,
})
@ -368,7 +368,7 @@ var _ = SIGDescribe("Projected secret", func() {
},
}
ginkgo.By("Creating the pod")
e2etodopod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -505,7 +505,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
fileModeRegexp,
}
e2etodopod.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}
func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
@ -582,5 +582,5 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp,
}
e2etodopod.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}

View File

@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -191,7 +191,7 @@ var _ = SIGDescribe("Secrets", func() {
}
fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", nil)
e2etodopod.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
fileModeRegexp,
})
@ -334,7 +334,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
ginkgo.By("Creating the pod")
e2etodopod.NewPodClient(f).CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -535,7 +535,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
fileModeRegexp,
}
e2etodopod.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
@ -603,7 +603,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp,
}
e2etodopod.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}
func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error {
@ -650,7 +650,7 @@ func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName
},
}
ginkgo.By("Creating the pod")
pod = e2etodopod.NewPodClient(f).Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}
@ -711,6 +711,6 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
},
}
ginkgo.By("Creating the pod")
pod = e2etodopod.NewPodClient(f).Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}

View File

@ -65,8 +65,8 @@ import (
// reconfigure framework
_ "k8s.io/kubernetes/test/e2e/framework/debug/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/metrics/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/node/init"
_ "k8s.io/kubernetes/test/e2e/framework/metrics/init"
_ "k8s.io/kubernetes/test/e2e/framework/node/init"
)
// handleFlags sets up all flags and parses the command line.

View File

@ -59,9 +59,9 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -466,10 +466,10 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
}
j.Logger.Infof("creating replication controller")
e2etodokubectl.RunKubectlOrDieInput(ns, read("rc.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(ns, read("rc.yaml"), "create", "-f", "-")
j.Logger.Infof("creating service")
e2etodokubectl.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-")
if len(svcAnnotations) > 0 {
svcList, err := j.Client.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
@ -482,7 +482,7 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
if exists("secret.yaml") {
j.Logger.Infof("creating secret")
e2etodokubectl.RunKubectlOrDieInput(ns, read("secret.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(ns, read("secret.yaml"), "create", "-f", "-")
}
j.Logger.Infof("Parsing ingress from %v", filepath.Join(manifestPath, "ing.yaml"))
@ -551,7 +551,7 @@ func (j *TestJig) runCreate(ing *networkingv1.Ingress) (*networkingv1.Ingress, e
if err := ingressToManifest(ing, filePath); err != nil {
return nil, err
}
_, err := e2etodokubectl.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
_, err := e2ekubectl.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
return ing, err
}
@ -566,14 +566,14 @@ func (j *TestJig) runUpdate(ing *networkingv1.Ingress) (*networkingv1.Ingress, e
if err := ingressToManifest(ing, filePath); err != nil {
return nil, err
}
_, err := e2etodokubectl.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
_, err := e2ekubectl.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
return ing, err
}
// DescribeIng describes information of ingress by running kubectl describe ing.
func DescribeIng(ns string) {
framework.Logf("\nOutput of kubectl describe ing:\n")
desc, _ := e2etodokubectl.RunKubectl(
desc, _ := e2ekubectl.RunKubectl(
ns, "describe", "ing")
framework.Logf(desc)
}
@ -681,7 +681,7 @@ func (j *TestJig) runDelete(ing *networkingv1.Ingress) error {
if err := ingressToManifest(ing, filePath); err != nil {
return err
}
_, err := e2etodokubectl.RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
_, err := e2ekubectl.RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
return err
}
@ -689,7 +689,7 @@ func (j *TestJig) runDelete(ing *networkingv1.Ingress) error {
// TODO(nikhiljindal): Update this to be able to return hostname as well.
func getIngressAddressFromKubemci(name string) ([]string, error) {
var addresses []string
out, err := e2etodokubectl.RunKubemciCmd("get-status", name)
out, err := e2ekubectl.RunKubemciCmd("get-status", name)
if err != nil {
return addresses, err
}
@ -1033,7 +1033,7 @@ func (cont *NginxIngressController) Init() {
}
framework.Logf("initializing nginx ingress controller")
e2etodokubectl.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-")
rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
framework.ExpectNoError(err)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
package kubectl
import (
"bytes"
@ -31,7 +31,6 @@ import (
uexec "k8s.io/utils/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
)
// KubectlBuilder is used to build, customize and execute a kubectl Command.
@ -44,7 +43,7 @@ type KubectlBuilder struct {
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
b := new(KubectlBuilder)
tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
tk := NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
b.cmd = tk.KubectlCmd(args...)
return b
}

View File

@ -20,17 +20,16 @@ import (
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)
func GrabBeforeEach(f *framework.Framework) (result *e2emetrics.Collection) {
func GrabBeforeEach(f *framework.Framework) (result *Collection) {
gatherMetricsAfterTest := framework.TestContext.GatherMetricsAfterTest == "true" || framework.TestContext.GatherMetricsAfterTest == "master"
if !gatherMetricsAfterTest || !framework.TestContext.IncludeClusterAutoscalerMetrics {
return nil
}
ginkgo.By("Gathering metrics before test", func() {
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
return
@ -47,7 +46,7 @@ func GrabBeforeEach(f *framework.Framework) (result *e2emetrics.Collection) {
return
}
func GrabAfterEach(f *framework.Framework, before *e2emetrics.Collection) {
func GrabAfterEach(f *framework.Framework, before *Collection) {
if framework.TestContext.GatherMetricsAfterTest == "false" {
return
}
@ -55,7 +54,7 @@ func GrabAfterEach(f *framework.Framework, before *e2emetrics.Collection) {
ginkgo.By("Gathering metrics after test", func() {
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
grabMetricsFromKubelets := framework.TestContext.GatherMetricsAfterTest != "master" && !framework.ProviderIs("kubemark")
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
return
@ -66,9 +65,9 @@ func GrabAfterEach(f *framework.Framework, before *e2emetrics.Collection) {
return
}
if before == nil {
before = &e2emetrics.Collection{}
before = &Collection{}
}
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(*before)
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
(*ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(*before)
f.TestSummaries = append(f.TestSummaries, (*ComponentCollection)(&received))
})
}

View File

@ -22,16 +22,16 @@ import (
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2etodometrics "k8s.io/kubernetes/test/e2e/framework/todo/metrics"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
func(f *framework.Framework) {
ginkgo.BeforeEach(func() {
metrics := e2etodometrics.GrabBeforeEach(f)
metrics := e2emetrics.GrabBeforeEach(f)
ginkgo.DeferCleanup(func() {
e2etodometrics.GrabAfterEach(f, metrics)
e2emetrics.GrabAfterEach(f, metrics)
})
})
},

View File

@ -41,13 +41,12 @@ import (
clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl"
e2etodonode "k8s.io/kubernetes/test/e2e/framework/todo/node"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
netutils "k8s.io/utils/net"
)
@ -177,7 +176,7 @@ type NetworkingTestConfig struct {
// 1 pod per node running the netexecImage.
EndpointPods []*v1.Pod
f *framework.Framework
podClient *e2etodopod.PodClient
podClient *e2epod.PodClient
// NodePortService is a Service with Type=NodePort spanning over all
// endpointPods.
NodePortService *v1.Service
@ -251,7 +250,7 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
continue
}
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := e2etodokubectl.RunKubectl(
desc, _ := e2ekubectl.RunKubectl(
e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
framework.Logf(desc)
}
@ -359,7 +358,7 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe
eps := sets.NewString()
for i := 0; i < tries; i++ {
stdout, stderr, err := e2etodopod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
if err != nil {
// A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
@ -394,7 +393,7 @@ func (config *NetworkingTestConfig) GetResponseFromContainer(protocol, dialComma
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort))
cmd := makeCURLDialCommand(ipPort, dialCommand, protocol, targetIP, targetPort)
stdout, stderr, err := e2etodopod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
if err != nil {
return NetexecDialResponse{}, fmt.Errorf("failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
}
@ -418,7 +417,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP
targetIP,
targetPort,
path)
stdout, stderr, err := e2etodopod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
// We only care about the status code reported by curl,
// and want to return any other errors, such as cannot execute command in the Pod.
// If curl failed to connect to host, it would exit with code 7, which makes `ExecShellInPodWithFullOutput`
@ -466,7 +465,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
filterCmd := fmt.Sprintf("%s | grep -v '^\\s*$'", cmd)
framework.Logf("Going to poll %v on port %v at least %v times, with a maximum of %v tries before failing", targetIP, targetPort, minTries, maxTries)
for i := 0; i < maxTries; i++ {
stdout, stderr, err := e2etodopod.ExecShellInPodWithFullOutput(config.f, config.HostTestContainerPod.Name, filterCmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.HostTestContainerPod.Name, filterCmd)
if err != nil || len(stderr) > 0 {
// A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
@ -522,7 +521,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
podName := config.HostTestContainerPod.Name
var msg string
if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) {
stdout, err := e2etodopod.RunHostCmd(config.Namespace, podName, cmd)
stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd)
if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
framework.Logf(msg)
@ -536,7 +535,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
return true, nil
}); pollErr != nil {
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := e2etodokubectl.RunKubectl(
desc, _ := e2ekubectl.RunKubectl(
config.Namespace, "describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
framework.Logf("%s", desc)
framework.Failf("Timed out in %v: %v", retryTimeout, msg)
@ -779,7 +778,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector)
ginkgo.By("Getting node addresses")
framework.ExpectNoError(e2etodonode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet)
framework.ExpectNoError(err)
@ -839,7 +838,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
}
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
framework.ExpectNoError(e2etodonode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetBoundedReadySchedulableNodes(config.f.ClientSet, maxNetProxyPodsCount)
framework.ExpectNoError(err)
nodes := nodeList.Items
@ -896,9 +895,9 @@ func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod {
return config.getPodClient().Create(pod)
}
func (config *NetworkingTestConfig) getPodClient() *e2etodopod.PodClient {
func (config *NetworkingTestConfig) getPodClient() *e2epod.PodClient {
if config.podClient == nil {
config.podClient = e2etodopod.NewPodClient(config.f)
config.podClient = e2epod.NewPodClient(config.f)
}
return config.podClient
}

View File

@ -30,7 +30,6 @@ import (
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
const (
@ -49,7 +48,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
return wait.PollImmediate(
30*time.Second,
timeout,
e2enode.CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
)
}
@ -129,7 +128,7 @@ func allNodesReady(c clientset.Interface, timeout time.Duration) error {
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
if !IsConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}

View File

@ -23,14 +23,14 @@ import (
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/todo/node"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
func(f *framework.Framework) {
ginkgo.AfterEach(func() {
node.AllNodesReady(f.ClientSet, 3*time.Minute)
e2enode.AllNodesReady(f.ClientSet, 3*time.Minute)
})
},
)

View File

@ -27,7 +27,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
@ -55,11 +54,11 @@ func (k *NodeKiller) Run(stopCh <-chan struct{}) {
}
func (k *NodeKiller) pickNodes() []v1.Node {
nodes, err := e2enode.GetReadySchedulableNodes(k.client)
nodes, err := GetReadySchedulableNodes(k.client)
framework.ExpectNoError(err)
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
nodes, err = e2enode.GetBoundedReadySchedulableNodes(k.client, numNodes)
nodes, err = GetBoundedReadySchedulableNodes(k.client, numNodes)
framework.ExpectNoError(err)
return nodes.Items
}

View File

@ -887,13 +887,3 @@ func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}

View File

@ -21,22 +21,22 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
)
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
framework.Logf("Waiting for SSH tunnels to establish")
e2etodokubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
e2ekubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",
"--restart=Never",
"--command", "--",
"echo", "Hello")
defer e2etodokubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
_, err := e2etodokubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
package output
import (
"context"
@ -32,8 +32,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
apiv1pod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl"
)
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
@ -58,7 +58,7 @@ func LookForStringInPodExecToContainer(ns, podName, containerName string, comman
}
args = append(args, "--")
args = append(args, command...)
return e2etodokubectl.RunKubectlOrDie(ns, args...)
return e2ekubectl.RunKubectlOrDie(ns, args...)
})
}
@ -79,13 +79,13 @@ func lookForString(expectedString string, timeout time.Duration, fn func() strin
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return e2etodokubectl.RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
return e2ekubectl.RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
}
// RunHostCmdWithFullOutput runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell. It will also return the command's stderr.
func RunHostCmdWithFullOutput(ns, name, cmd string) (string, string, error) {
return e2etodokubectl.RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
return e2ekubectl.RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
@ -117,13 +117,13 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
// LookForStringInLog looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
return e2etodokubectl.RunKubectlOrDie(ns, "logs", podName, container)
return e2ekubectl.RunKubectlOrDie(ns, "logs", podName, container)
})
}
// CreateEmptyFileOnPod creates empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := e2etodokubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}
@ -131,10 +131,10 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := e2etodokubectl.RunKubectl(ns, "describe", "po", s.Name)
desc, _ := e2ekubectl.RunKubectl(ns, "describe", "po", s.Name)
framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
l, _ := e2etodokubectl.RunKubectl(ns, "logs", s.Name, "--tail=100")
l, _ := e2ekubectl.RunKubectl(ns, "logs", s.Name, "--tail=100")
framework.Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
}
}
@ -151,12 +151,12 @@ func MatchContainerOutput(
if ns == "" {
ns = f.Namespace.Name
}
podClient := PodClientNS(f, ns)
podClient := e2epod.PodClientNS(f, ns)
createdPod := podClient.Create(pod)
defer func() {
ginkgo.By("delete the pod")
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.

View File

@ -41,7 +41,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
@ -104,7 +103,7 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
namespace := c.f.Namespace.Name
p := c.Create(pod)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -170,7 +169,7 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta
return err
}
framework.ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
framework.ExpectNoError(WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
return nil
}
@ -182,7 +181,7 @@ func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeou
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Failed to delete pod %q: %v", name, err)
}
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
}
@ -226,7 +225,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
// TODO(random-liu): Move pod wait function into this file
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -243,7 +242,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
// WaitForFinish waits for pod to finish running, regardless of success or failure.
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
f := c.f
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -260,7 +259,7 @@ func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(Poll, framework.PodStartTimeout, func() (bool, error) {
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
@ -284,7 +283,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
// MatchContainerOutput gets output of a container and match expected regexp in the output.
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
f := c.f
output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
if err != nil {
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
}

View File

@ -22,7 +22,7 @@ import (
"path"
"k8s.io/kubernetes/test/e2e/framework"
e2etodonode "k8s.io/kubernetes/test/e2e/framework/todo/node"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
const etcdImage = "3.5.5-0"
@ -76,7 +76,7 @@ func MasterUpgradeGKE(namespace string, v string) error {
return err
}
e2etodonode.WaitForSSHTunnels(namespace)
e2enode.WaitForSSHTunnels(namespace)
return nil
}

View File

@ -26,7 +26,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -48,7 +47,7 @@ func LoadAppArmorProfiles(nsName string, clientset clientset.Interface) {
// CreateAppArmorTestPod creates a pod that tests apparmor profile enforcement. The pod exits with
// an error code if the profile is incorrectly enforced. If runOnce is true the pod will exit after
// a single test, otherwise it will repeat the test every 1 second until failure.
func CreateAppArmorTestPod(nsName string, clientset clientset.Interface, podClient *e2etodopod.PodClient, unconfined bool, runOnce bool) *v1.Pod {
func CreateAppArmorTestPod(nsName string, clientset clientset.Interface, podClient *e2epod.PodClient, unconfined bool, runOnce bool) *v1.Pod {
profile := "localhost/" + appArmorProfilePrefix + nsName
testCmd := fmt.Sprintf(`
if touch %[1]s; then

View File

@ -45,8 +45,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
netutils "k8s.io/utils/net"
@ -911,7 +911,7 @@ func testEndpointReachability(endpoint string, port int32, protocol v1.Protocol,
}
err := wait.PollImmediate(1*time.Second, ServiceReachabilityShortPollTimeout, func() (bool, error) {
_, err := e2etodopod.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
_, err := e2epodoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("Service reachability failing with error: %v\nRetrying...", err)
return false, nil
@ -1003,7 +1003,7 @@ func (j *TestJig) checkExternalServiceReachability(svc *v1.Service, pod *v1.Pod)
// Service must resolve to IP
cmd := fmt.Sprintf("nslookup %s", svcName)
return wait.PollImmediate(framework.Poll, ServiceReachabilityShortPollTimeout, func() (done bool, err error) {
_, stderr, err := e2etodopod.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd)
_, stderr, err := e2epodoutput.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd)
// NOTE(claudiub): nslookup may return 0 on Windows, even though the DNS name was not found. In this case,
// we can check stderr for the error.
if err != nil || (framework.NodeOSDistroIs("windows") && strings.Contains(stderr, fmt.Sprintf("can't find %s", svcName))) {

View File

@ -30,7 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/util/podutils"
"k8s.io/kubernetes/test/e2e/framework"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -166,7 +166,7 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
if resumedPod != "" {
framework.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
}
_, err := e2etodopod.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
_, err := e2epodoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
framework.ExpectNoError(err)
framework.Logf("Resumed pod %v", pod.Name)
resumedPod = pod.Name

View File

@ -34,7 +34,7 @@ import (
"k8s.io/kubectl/pkg/util/podutils"
"k8s.io/kubernetes/test/e2e/framework"
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
@ -193,7 +193,7 @@ func CheckHostname(c clientset.Interface, ss *appsv1.StatefulSet) error {
cmd := "printf $(hostname)"
podList := GetPodList(c, ss)
for _, statefulPod := range podList.Items {
hostname, err := e2etodopod.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
hostname, err := e2epodoutput.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
if err != nil {
return err
}
@ -237,7 +237,7 @@ func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error
func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error {
podList := GetPodList(c, ss)
for _, statefulPod := range podList.Items {
stdout, err := e2etodopod.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
stdout, err := e2epodoutput.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
framework.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
if err != nil {
return err

View File

@ -1,7 +0,0 @@
This directory holds sub packages which, in contrast to other sub packages
under test/e2e/framework, may use test/e2e/framework because that is not
depending on them.
This is an interim solution for moving code without causing cycling
dependencies. All code will be moved from here into the normal sub packages
when the refactoring is done.

View File

@ -56,9 +56,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
@ -356,7 +356,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
}
}
if config.ServerReadyMessage != "" {
_, err := e2etodopod.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
_, err := e2epodoutput.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
@ -477,7 +477,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Block: check content
deviceName := fmt.Sprintf("/opt/%d", i)
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := e2etodopod.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
@ -486,7 +486,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err := e2etodopod.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
@ -497,14 +497,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = e2etodopod.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = e2etodopod.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
@ -551,7 +551,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
}
ec.Resources = v1.ResourceRequirements{}
ec.Name = "volume-ephemeral-container"
err = e2etodopod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
@ -589,7 +589,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
}
out, err := e2etodokubectl.RunKubectl(injectorPod.Namespace, commands...)
out, err := e2ekubectl.RunKubectl(injectorPod.Namespace, commands...)
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
}
@ -650,7 +650,7 @@ func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persi
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
return e2etodopod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed

View File

@ -25,7 +25,6 @@ import (
"k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
)
// Result holds the execution result of remote execution command.
@ -150,7 +149,7 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
}
containerName := pod.Spec.Containers[0].Name
var err error
result.Stdout, result.Stderr, err = e2etodopod.ExecWithOptions(h.Framework, e2etodopod.ExecOptions{
result.Stdout, result.Stderr, err = e2epod.ExecWithOptions(h.Framework, e2epod.ExecOptions{
Command: args,
Namespace: pod.Namespace,
PodName: pod.Name,

View File

@ -30,8 +30,8 @@ import (
// reconfigure framework
_ "k8s.io/kubernetes/test/e2e/framework/debug/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/metrics/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/node/init"
_ "k8s.io/kubernetes/test/e2e/framework/metrics/init"
_ "k8s.io/kubernetes/test/e2e/framework/node/init"
)
func TestMain(m *testing.M) {

View File

@ -55,8 +55,8 @@ import (
// reconfigure framework
_ "k8s.io/kubernetes/test/e2e/framework/debug/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/metrics/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/node/init"
_ "k8s.io/kubernetes/test/e2e/framework/metrics/init"
_ "k8s.io/kubernetes/test/e2e/framework/node/init"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"