From dfdf88d4faafa6fd39988832ea0ef6d668f490e9 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 8 Sep 2022 16:04:17 +0200 Subject: [PATCH] e2e: adapt to moved code This is the result of automatically editing source files like this: go install golang.org/x/tools/cmd/goimports@latest find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh with e2e-framework-sed.sh containing this: sed -i \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \ -e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \ -e "s/framework.AllNodes\b/e2edebug.AllNodes/" \ -e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \ -e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \ -e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \ -e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \ -e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \ -e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \ -e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \ -e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \ -e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \ -e "s/framework.EventsLister\b/e2edebug.EventsLister/" \ -e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \ -e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \ -e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \ -e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \ -e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \ -e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \ -e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \ -e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \ -e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \ -e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \ -e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \ -e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \ -e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \ -e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \ -e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \ -e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \ -e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \ -e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \ -e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \ -e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \ -e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \ -e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \ -e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \ -e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \ -e "s/framework.NodesSet\b/e2edebug.NodesSet/" \ -e "s/framework.PodClient\b/e2epod.PodClient/" \ -e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \ -e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \ -e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \ -e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \ -e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \ -e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \ -e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \ -e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \ -e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \ -e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \ -e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \ -e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \ -e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \ -e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \ -e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \ -e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \ -e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \ -e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \ -e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \ -e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \ -e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \ -e "s/framework.WorkItem\b/e2edebug.WorkItem/" \ "$@" for i in "$@"; do # Import all sub packages and let goimports figure out which of those # are redundant (= already imported) or not needed. sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i" goimports -w "$i" done --- test/e2e/apimachinery/crd_publish_openapi.go | 47 ++-- test/e2e/apimachinery/webhook.go | 3 +- test/e2e/apps/daemon_restart.go | 3 +- test/e2e/apps/daemon_set.go | 3 +- test/e2e/apps/disruption.go | 3 +- test/e2e/apps/job.go | 4 +- test/e2e/apps/rc.go | 2 +- test/e2e/apps/replica_set.go | 2 +- test/e2e/apps/statefulset.go | 30 ++- test/e2e/architecture/conformance.go | 2 +- test/e2e/auth/node_authn.go | 7 +- test/e2e/auth/node_authz.go | 3 +- test/e2e/auth/service_accounts.go | 5 +- .../cluster_autoscaler_scalability.go | 2 +- .../autoscaling/cluster_size_autoscaling.go | 15 +- .../e2e/cloud/gcp/common/upgrade_mechanics.go | 15 +- test/e2e/cloud/gcp/ha_master.go | 4 +- test/e2e/e2e.go | 7 +- test/e2e/framework/job/fixtures.go | 2 +- test/e2e/framework/job/rest.go | 1 + test/e2e/framework/job/wait.go | 2 +- test/e2e/framework/manifest/manifest.go | 2 +- test/e2e/framework/timer/timer.go | 3 +- .../instrumentation/logging/generic_soak.go | 5 +- .../logging/utils/logging_pod.go | 4 +- .../instrumentation/monitoring/accelerator.go | 3 +- .../monitoring/custom_metrics_deployments.go | 9 +- .../monitoring/custom_metrics_stackdriver.go | 2 +- .../monitoring/stackdriver_metadata_agent.go | 2 +- test/e2e/kubectl/kubectl.go | 248 +++++++++--------- test/e2e/network/conntrack.go | 24 +- test/e2e/network/dns.go | 6 +- test/e2e/network/dns_common.go | 2 +- test/e2e/network/dns_scale_records.go | 2 +- test/e2e/network/dual_stack.go | 7 +- test/e2e/network/endpointslice.go | 5 +- test/e2e/network/example_cluster_dns.go | 12 +- test/e2e/network/fixture.go | 1 + test/e2e/network/funny_ips.go | 5 +- test/e2e/network/hostport.go | 8 +- test/e2e/network/kube_proxy.go | 11 +- test/e2e/network/loadbalancer.go | 5 +- test/e2e/network/netpol/kubemanager.go | 2 +- test/e2e/network/netpol/network_legacy.go | 7 +- test/e2e/network/netpol/reachability.go | 3 +- test/e2e/network/networking_perf.go | 5 +- .../network/scale/localrun/ingress_scale.go | 2 +- test/e2e/network/service.go | 62 ++--- test/e2e/network/topology_hints.go | 2 +- test/e2e/network/util.go | 8 +- test/e2e/node/apparmor.go | 5 +- test/e2e/node/examples.go | 18 +- test/e2e/node/kubelet.go | 8 +- test/e2e/node/mount_propagation.go | 11 +- test/e2e/node/pods.go | 16 +- test/e2e/node/pre_stop.go | 4 +- test/e2e/node/runtimeclass.go | 19 +- test/e2e/node/security_context.go | 21 +- test/e2e/node/taints.go | 14 +- test/e2e/scheduling/nvidia-gpus.go | 15 +- test/e2e/scheduling/predicates.go | 52 ++-- test/e2e/scheduling/preemption.go | 4 +- test/e2e/scheduling/priorities.go | 18 +- test/e2e/storage/drivers/proxy/io.go | 3 +- test/e2e/storage/empty_dir_wrapper.go | 4 +- .../flexvolume_mounted_volume_resize.go | 6 +- test/e2e/storage/flexvolume_online_resize.go | 6 +- test/e2e/storage/gke_local_ssd.go | 5 +- test/e2e/storage/host_path_type.go | 10 +- test/e2e/storage/mounted_volume_resize.go | 6 +- test/e2e/storage/pd.go | 2 +- test/e2e/storage/persistent_volumes-local.go | 3 +- test/e2e/storage/pv_protection.go | 3 +- test/e2e/storage/pvc_protection.go | 3 +- test/e2e/storage/testsuites/multivolume.go | 5 +- test/e2e/storage/testsuites/snapshottable.go | 5 +- test/e2e/storage/testsuites/subpath.go | 10 +- test/e2e/storage/testsuites/volumes.go | 3 +- test/e2e/storage/utils/local.go | 2 +- test/e2e/storage/volume_limits.go | 4 +- test/e2e/storage/vsphere/bootstrap.go | 3 +- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 3 +- .../e2e/storage/vsphere/pvc_label_selector.go | 3 +- test/e2e/storage/vsphere/vsphere_scale.go | 4 +- test/e2e/storage/vsphere/vsphere_utils.go | 12 +- .../vsphere/vsphere_volume_cluster_ds.go | 1 + .../vsphere/vsphere_volume_diskformat.go | 5 +- .../storage/vsphere/vsphere_volume_fstype.go | 3 +- .../vsphere/vsphere_volume_master_restart.go | 6 +- .../vsphere/vsphere_volume_node_delete.go | 2 +- .../vsphere/vsphere_volume_node_poweroff.go | 2 +- .../vsphere/vsphere_volume_placement.go | 15 +- .../vsphere/vsphere_volume_vpxd_restart.go | 4 +- test/e2e/upgrades/apps/cassandra.go | 3 +- test/e2e/upgrades/apps/daemonsets.go | 1 + test/e2e/upgrades/apps/etcd.go | 3 +- test/e2e/upgrades/apps/mysql.go | 3 +- test/e2e/upgrades/apps/replicasets.go | 1 + test/e2e/upgrades/apps/statefulset.go | 3 +- test/e2e/upgrades/node/apparmor.go | 8 +- test/e2e/upgrades/node/configmaps.go | 7 +- test/e2e/upgrades/node/secrets.go | 7 +- test/e2e/upgrades/node/sysctl.go | 14 +- .../upgrades/storage/persistent_volumes.go | 3 +- test/e2e/windows/cpu_limits.go | 8 +- test/e2e/windows/density.go | 4 +- test/e2e/windows/device_plugin.go | 9 +- test/e2e/windows/dns.go | 6 +- test/e2e/windows/gmsa_full.go | 17 +- test/e2e/windows/gmsa_kubelet.go | 6 +- test/e2e/windows/host_process.go | 24 +- test/e2e/windows/hybrid_network.go | 11 +- test/e2e/windows/kubelet_stats.go | 4 +- test/e2e/windows/reboot_node.go | 13 +- test/e2e/windows/security_context.go | 21 +- test/e2e/windows/service.go | 3 +- test/e2e/windows/volumes.go | 13 +- test/e2e_kubeadm/bootstrap_token_test.go | 1 + test/e2e_kubeadm/controlplane_nodes_test.go | 3 +- test/e2e_kubeadm/util.go | 1 + test/e2e_node/apparmor_test.go | 8 +- test/e2e_node/benchmark_util.go | 2 +- test/e2e_node/checkpoint_container.go | 3 +- test/e2e_node/container_log_rotation_test.go | 3 +- test/e2e_node/container_manager_test.go | 7 +- test/e2e_node/cpu_manager_test.go | 44 ++-- test/e2e_node/critical_pod_test.go | 13 +- test/e2e_node/density_test.go | 9 +- test/e2e_node/device_manager_test.go | 8 +- test/e2e_node/device_plugin_test.go | 32 +-- test/e2e_node/eviction_test.go | 7 +- test/e2e_node/garbage_collector_test.go | 5 +- test/e2e_node/hugepages_test.go | 8 +- test/e2e_node/image_credential_provider.go | 5 +- test/e2e_node/image_id_test.go | 7 +- test/e2e_node/image_list.go | 18 +- test/e2e_node/log_path_test.go | 4 +- test/e2e_node/memory_manager_test.go | 28 +- test/e2e_node/node_perf_test.go | 4 +- test/e2e_node/node_problem_detector_linux.go | 4 +- test/e2e_node/node_shutdown_linux_test.go | 15 +- test/e2e_node/numa_alignment.go | 5 +- test/e2e_node/os_label_rename_test.go | 11 +- test/e2e_node/pids_test.go | 4 +- test/e2e_node/pod_conditions_test.go | 8 +- test/e2e_node/pod_hostnamefqdn_test.go | 13 +- test/e2e_node/podresources_test.go | 2 +- test/e2e_node/pods_container_manager_test.go | 28 +- test/e2e_node/resource_collector.go | 2 +- test/e2e_node/resource_metrics_test.go | 7 +- test/e2e_node/resource_usage_test.go | 5 +- test/e2e_node/restart_test.go | 3 +- test/e2e_node/runtime_conformance_test.go | 5 +- test/e2e_node/runtimeclass_test.go | 4 +- test/e2e_node/seccompdefault_test.go | 5 +- test/e2e_node/security_context_test.go | 18 +- test/e2e_node/summary_test.go | 3 +- test/e2e_node/system_node_critical_test.go | 2 +- test/e2e_node/topology_manager_test.go | 6 +- test/e2e_node/volume_manager_test.go | 8 +- 160 files changed, 822 insertions(+), 715 deletions(-) diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index f8ca3d64a32..e5b89c61b86 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -42,6 +42,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" "k8s.io/kubernetes/test/utils/crd" admissionapi "k8s.io/pod-security-admission/api" ) @@ -76,22 +77,22 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("kubectl validation (kubectl create and apply) allows request with known and required properties") validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta) - if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create valid CR %s: %v", validCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { framework.Failf("failed to delete valid CR: %v", err) } - if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply valid CR %s: %v", validCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { framework.Failf("failed to delete valid CR: %v", err) } ginkgo.By("kubectl validation (kubectl create and apply) rejects request with value outside defined enum values") badEnumValueCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar", "feeling":"NonExistentValue"}]}}`, meta) - if _, err := framework.RunKubectlInput(f.Namespace.Name, badEnumValueCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `Unsupported value: "NonExistentValue"`) { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, badEnumValueCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `Unsupported value: "NonExistentValue"`) { framework.Failf("unexpected no error when creating CR with unknown enum value: %v", err) } @@ -99,20 +100,20 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu // Because server-side is default in beta but not GA yet, we will produce different behaviors in the default vs GA only conformance tests. We have made the error generic enough to pass both, but should go back and make the error more specific once server-side validation goes GA. ginkgo.By("kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema") unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta) - if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) { framework.Failf("unexpected no error when creating CR with unknown field: %v", err) } - if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) { framework.Failf("unexpected no error when applying CR with unknown field: %v", err) } // TODO: see above note, we should check the value of the error once server-side validation is GA. ginkgo.By("kubectl validation (kubectl create and apply) rejects request without required properties") noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta) - if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) { framework.Failf("unexpected no error when creating CR without required field: %v", err) } - if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) { framework.Failf("unexpected no error when applying CR without required field: %v", err) } @@ -133,7 +134,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu } ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist") - if _, err := framework.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) { framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err) } @@ -160,16 +161,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties") randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) - if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } - if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } @@ -201,16 +202,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties") randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) - if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } - if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } @@ -243,16 +244,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties") randomCR := fmt.Sprintf(`{%s,"spec":{"a":null,"b":[{"c":"d"}]}}`, meta) - if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { framework.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } - if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) } - if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { + if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { framework.Failf("failed to delete random CR: %v", err) } @@ -715,7 +716,7 @@ func dropDefaults(s *spec.Schema) { } func verifyKubectlExplain(ns, name, pattern string) error { - result, err := framework.RunKubectl(ns, "explain", name) + result, err := e2ekubectl.RunKubectl(ns, "explain", name) if err != nil { return fmt.Errorf("failed to explain %s: %v", name, err) } diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 87e0fbbf9ef..c6205f521f0 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -44,6 +44,7 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" @@ -1183,7 +1184,7 @@ func testAttachingPodWebhook(f *framework.Framework) { ginkgo.By("'kubectl attach' the pod, should be denied by the webhook") timer := time.NewTimer(30 * time.Second) defer timer.Stop() - _, err = framework.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec() + _, err = e2ekubectl.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec() framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook") if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) { framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a) diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 98a4a51ecbc..ea21f24f8b5 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/cluster/ports" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/test/e2e/framework" + e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -335,7 +336,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { } postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) if postRestarts != preRestarts { - framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf) + e2edebug.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) } }) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index fddbdb7430b..35a430cfa50 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -53,6 +53,7 @@ import ( "k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/test/e2e/framework" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" admissionapi "k8s.io/pod-security-admission/api" @@ -770,7 +771,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { return pod.DeletionTimestamp == nil && oldVersion == pod.Spec.Containers[0].Env[0].Value }); pod != nil { // make the /tmp/ready file read only, which will cause readiness to fail - if _, err := framework.RunKubectl(pod.Namespace, "exec", "-c", pod.Spec.Containers[0].Name, pod.Name, "--", "/bin/sh", "-ec", "echo 0 > /var/tmp/ready"); err != nil { + if _, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", "-c", pod.Spec.Containers[0].Name, pod.Name, "--", "/bin/sh", "-ec", "echo 0 > /var/tmp/ready"); err != nil { framework.Logf("Failed to mark pod %s as unready via exec: %v", pod.Name, err) } else { framework.Logf("Marked old pod %s as unready", pod.Name) diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index e43932d5230..082380b87d9 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -19,10 +19,11 @@ package apps import ( "context" "fmt" - "github.com/onsi/gomega" "strings" "time" + "github.com/onsi/gomega" + jsonpatch "github.com/evanphx/json-patch" "github.com/onsi/ginkgo/v2" diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 13c9840f1fd..bc47c2c311b 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -351,7 +351,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism))) pod := pods.Items[0] - f.PodClient().Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { pod.OwnerReferences = nil }) @@ -370,7 +370,7 @@ var _ = SIGDescribe("Job", func() { )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name) ginkgo.By("Removing the labels from the Job's Pod") - f.PodClient().Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { pod.Labels = nil }) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index c6163462134..d07dc2a7d79 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -596,7 +596,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { func testRCAdoptMatchingOrphans(f *framework.Framework) { name := "pod-adoption" ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) - p := f.PodClient().CreateSync(&v1.Pod{ + p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 0a5d0adce9e..c16ac899f72 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -323,7 +323,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) { func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { name := "pod-adoption-release" ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) - p := f.PodClient().CreateSync(&v1.Pod{ + p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index a67559b56aa..29dfb423d93 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -45,8 +45,10 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" @@ -121,7 +123,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.AfterEach(func() { if ginkgo.CurrentSpecReport().Failed() { - framework.DumpDebugInfo(c, ns) + e2eoutput.DumpDebugInfo(c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) e2estatefulset.DeleteAllStatefulSets(c, ns) @@ -195,7 +197,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectEqual(controllerRef.UID, ss.UID) ginkgo.By("Orphaning one of the stateful set's pods") - f.PodClient().Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { pod.OwnerReferences = nil }) @@ -215,7 +217,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Removing the labels from one of the stateful set's pods") prevLabels := pod.Labels - f.PodClient().Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { pod.Labels = nil }) @@ -232,7 +234,7 @@ var _ = SIGDescribe("StatefulSet", func() { // If we don't do this, the test leaks the Pod and PVC. ginkgo.By("Readding labels to the stateful set's pod") - f.PodClient().Update(pod.Name, func(pod *v1.Pod) { + e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) { pod.Labels = prevLabels }) @@ -1108,7 +1110,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.AfterEach(func() { if ginkgo.CurrentSpecReport().Failed() { - framework.DumpDebugInfo(c, ns) + e2eoutput.DumpDebugInfo(c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) e2estatefulset.DeleteAllStatefulSets(c, ns) @@ -1201,7 +1203,7 @@ var _ = SIGDescribe("StatefulSet", func() { e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2) ginkgo.By("check availableReplicas are shown in status") - out, err := framework.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml") + out, err := e2ekubectl.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml") framework.ExpectNoError(err) if !strings.Contains(out, "availableReplicas: 2") { framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 2, out) @@ -1231,7 +1233,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.AfterEach(func() { if ginkgo.CurrentSpecReport().Failed() { - framework.DumpDebugInfo(c, ns) + e2eoutput.DumpDebugInfo(c, ns) } framework.Logf("Deleting all statefulset in ns %v", ns) e2estatefulset.DeleteAllStatefulSets(c, ns) @@ -1350,7 +1352,7 @@ var _ = SIGDescribe("StatefulSet", func() { func kubectlExecWithRetries(ns string, args ...string) (out string) { var err error for i := 0; i < 3; i++ { - if out, err = framework.RunKubectl(ns, args...); err == nil { + if out, err = e2ekubectl.RunKubectl(ns, args...); err == nil { return } framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) @@ -1414,14 +1416,14 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) { name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex) for k, v := range kv { cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v) - framework.Logf(framework.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) + framework.Logf(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) } } func (z *zookeeperTester) read(statefulPodIndex int, key string) string { name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex) cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh get /%v", key) - return lastLine(framework.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) + return lastLine(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) } type mysqlGaleraTester struct { @@ -1478,7 +1480,7 @@ func (m *redisTester) name() string { func (m *redisTester) redisExec(cmd, ns, podName string) string { cmd = fmt.Sprintf("/opt/redis/redis-cli -h %v %v", podName, cmd) - return framework.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) + return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) } func (m *redisTester) deploy(ns string) *appsv1.StatefulSet { @@ -1509,7 +1511,7 @@ func (c *cockroachDBTester) name() string { func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string { cmd = fmt.Sprintf("/cockroach/cockroach sql --insecure --host %s.cockroachdb -e \"%v\"", podName, cmd) - return framework.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) + return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) } func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet { @@ -1710,7 +1712,7 @@ func breakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { } // Ignore 'mv' errors to make this idempotent. cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path) - stdout, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout) + stdout, err := e2eoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout) framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) return err } @@ -1734,7 +1736,7 @@ func restorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { } // Ignore 'mv' errors to make this idempotent. cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path) - stdout, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout) + stdout, err := e2eoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout) framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) return err } diff --git a/test/e2e/architecture/conformance.go b/test/e2e/architecture/conformance.go index 95caf42222e..5bfdaaefb30 100644 --- a/test/e2e/architecture/conformance.go +++ b/test/e2e/architecture/conformance.go @@ -37,7 +37,7 @@ var _ = SIGDescribe("Conformance Tests", func() { */ framework.ConformanceIt("should have at least two untainted nodes", func() { ginkgo.By("Getting node addresses") - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute)) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) if len(nodeList.Items) < 2 { diff --git a/test/e2e/auth/node_authn.go b/test/e2e/auth/node_authn.go index 65ac92f8391..f9a84e08732 100644 --- a/test/e2e/auth/node_authn.go +++ b/test/e2e/auth/node_authn.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -60,7 +61,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { for _, nodeIP := range nodeIPs { // Anonymous authentication is disabled by default host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) - result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s/metrics", "%{http_code}", host)) + result := e2eoutput.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s/metrics", "%{http_code}", host)) gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials") } }) @@ -82,7 +83,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { for _, nodeIP := range nodeIPs { host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) - result := framework.RunHostCmdOrDie(ns, + result := e2eoutput.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s/metrics", "%{http_code}", @@ -96,5 +97,5 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { func createNodeAuthTestPod(f *framework.Framework) *v1.Pod { pod := e2epod.NewAgnhostPod(f.Namespace.Name, "agnhost-pod", nil, nil, nil) pod.ObjectMeta.GenerateName = "test-node-authn-" - return f.PodClient().CreateSync(pod) + return e2epod.NewPodClient(f).CreateSync(pod) } diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index b02a9a3e211..8336ba9bd0c 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -19,9 +19,10 @@ package auth import ( "context" "fmt" - apierrors "k8s.io/apimachinery/pkg/api/errors" "time" + apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 6be558595fb..b15d29aec82 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -316,7 +317,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`), } - f.TestContainerOutputRegexp("service account token: ", pod, 0, output) + e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output) }) /* @@ -424,7 +425,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID), fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID), } - f.TestContainerOutputRegexp("service account token: ", pod, 0, output) + e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output) } }) diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 55c56045490..fce86e13040 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 6b71f83645f..71252cb86fe 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -44,6 +44,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -381,7 +382,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout)) // We wait for nodes to become schedulable to make sure the new nodes // will be returned by getPoolNodes below. - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, resizeTimeout)) klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC") @@ -564,7 +565,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { removeLabels := func(nodesToClean sets.String) { ginkgo.By("Removing labels from nodes") for node := range nodesToClean { - framework.RemoveLabelOffNode(c, node, labelKey) + e2enode.RemoveLabelOffNode(c, node, labelKey) } } @@ -575,7 +576,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes)) for node := range nodesSet { - framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue) + e2enode.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue) } err = scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false) @@ -593,7 +594,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { if len(newNodesSet) > 1 { ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet)) klog.Infof("Usually only 1 new node is expected, investigating") - klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) + klog.Infof("Kubectl:%s\n", e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) if output, err := exec.Command("gcloud", "compute", "instances", "list", "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil { @@ -629,7 +630,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } ginkgo.By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List())) for node := range registeredNodes { - framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue) + e2enode.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue) } defer removeLabels(registeredNodes) @@ -1416,8 +1417,8 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface klog.Infof("Too many pods are not ready yet: %v", notready) } klog.Info("Timeout on waiting for pods being ready") - klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces")) - klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) + klog.Info(e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces")) + klog.Info(e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) // Some pods are still not running. return fmt.Errorf("Too many pods are still not running: %v", notready) diff --git a/test/e2e/cloud/gcp/common/upgrade_mechanics.go b/test/e2e/cloud/gcp/common/upgrade_mechanics.go index 10b2d65f4f7..dc829cc48fe 100644 --- a/test/e2e/cloud/gcp/common/upgrade_mechanics.go +++ b/test/e2e/cloud/gcp/common/upgrade_mechanics.go @@ -28,6 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers" "k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/utils/junit" ) @@ -80,7 +81,7 @@ func controlPlaneUpgrade(f *framework.Framework, v string, extraEnvs []string) e case "gce": return controlPlaneUpgradeGCE(v, extraEnvs) case "gke": - return framework.MasterUpgradeGKE(f.Namespace.Name, v) + return e2eproviders.MasterUpgradeGKE(f.Namespace.Name, v) default: return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider) } @@ -101,7 +102,7 @@ func controlPlaneUpgradeGCE(rawV string, extraEnvs []string) error { } v := "v" + rawV - _, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-M", v) + _, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-M", v) return err } @@ -172,10 +173,10 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error { env := append(os.Environ(), extraEnvs...) if img != "" { env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img) - _, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", "-o", v) + _, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-N", "-o", v) return err } - _, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", v) + _, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-N", v) return err } @@ -191,7 +192,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error { "container", "clusters", fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), - framework.LocationParamGKE(), + e2eproviders.LocationParamGKE(), "upgrade", framework.TestContext.CloudConfig.Cluster, fmt.Sprintf("--node-pool=%s", np), @@ -207,7 +208,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error { return err } - framework.WaitForSSHTunnels(namespace) + e2enode.WaitForSSHTunnels(namespace) } return nil } @@ -217,7 +218,7 @@ func nodePoolsGKE() ([]string, error) { "container", "node-pools", fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), - framework.LocationParamGKE(), + e2eproviders.LocationParamGKE(), "list", fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster), "--format=get(name)", diff --git a/test/e2e/cloud/gcp/ha_master.go b/test/e2e/cloud/gcp/ha_master.go index 13bbdf0ccaf..299715a635b 100644 --- a/test/e2e/cloud/gcp/ha_master.go +++ b/test/e2e/cloud/gcp/ha_master.go @@ -183,7 +183,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { for _, zone := range additionalNodesZones { removeWorkerNodes(zone) } - framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute)) + framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute)) // Clean-up additional master replicas if the test execution was broken. for _, zone := range additionalReplicaZones { @@ -218,7 +218,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone) } framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute)) - framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute)) + framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute)) // Verify that API server works correctly with HA master. rcName := "ha-master-" + strconv.Itoa(len(existingRCs)) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 626ddd46a72..afa7d9e7e9c 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -41,6 +41,7 @@ import ( commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/daemonset" + e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -223,7 +224,7 @@ func setupSuite() { // In large clusters we may get to this point but still have a bunch // of nodes without Routes created. Since this would make a node // unschedulable, we need to wait until all of them are schedulable. - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { @@ -242,7 +243,7 @@ func setupSuite() { // wasting the whole run), we allow for some not-ready pods (with the // number equal to the number of allowed not-ready nodes). if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { - framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) + e2edebug.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) framework.Failf("Error waiting for all pods to be running and ready: %v", err) } @@ -270,7 +271,7 @@ func setupSuite() { } if framework.TestContext.NodeKiller.Enabled { - nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) + nodeKiller := e2enode.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh) } } diff --git a/test/e2e/framework/job/fixtures.go b/test/e2e/framework/job/fixtures.go index 0ee29bd1d05..d07286d3151 100644 --- a/test/e2e/framework/job/fixtures.go +++ b/test/e2e/framework/job/fixtures.go @@ -18,7 +18,7 @@ package job import ( batchv1 "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/kubernetes/test/e2e/framework" diff --git a/test/e2e/framework/job/rest.go b/test/e2e/framework/job/rest.go index 913c61e9f4b..f653c34cc8a 100644 --- a/test/e2e/framework/job/rest.go +++ b/test/e2e/framework/job/rest.go @@ -18,6 +18,7 @@ package job import ( "context" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index 563f1781289..96bd4230427 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -21,7 +21,7 @@ import ( "time" batchv1 "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" diff --git a/test/e2e/framework/manifest/manifest.go b/test/e2e/framework/manifest/manifest.go index 4e694c3aeb6..556de474cb2 100644 --- a/test/e2e/framework/manifest/manifest.go +++ b/test/e2e/framework/manifest/manifest.go @@ -23,7 +23,7 @@ import ( "time" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilyaml "k8s.io/apimachinery/pkg/util/yaml" diff --git a/test/e2e/framework/timer/timer.go b/test/e2e/framework/timer/timer.go index 9620ae021c5..fee604006c2 100644 --- a/test/e2e/framework/timer/timer.go +++ b/test/e2e/framework/timer/timer.go @@ -22,9 +22,10 @@ import ( "bytes" "fmt" + "sync" + "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/perftype" - "sync" ) var now = time.Now diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index d9d427d270a..588c582c422 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -23,10 +23,11 @@ import ( "sync" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" e2econfig "k8s.io/kubernetes/test/e2e/framework/config" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -118,7 +119,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname // we don't validate total log data, since there is no guarantee all logs will be stored forever. // instead, we just validate that some logs are being created in std out. Verify: func(p v1.Pod) (bool, error) { - s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second) + s, err := e2eoutput.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second) return s != "", err }, }, diff --git a/test/e2e/instrumentation/logging/utils/logging_pod.go b/test/e2e/instrumentation/logging/utils/logging_pod.go index 7c34bf47164..04ff9722897 100644 --- a/test/e2e/instrumentation/logging/utils/logging_pod.go +++ b/test/e2e/instrumentation/logging/utils/logging_pod.go @@ -95,7 +95,7 @@ func (p *loadLoggingPod) Name() string { func (p *loadLoggingPod) Start(f *framework.Framework) error { framework.Logf("Starting load logging pod %s", p.name) - f.PodClient().Create(&v1.Pod{ + e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: p.name, }, @@ -161,7 +161,7 @@ func (p *execLoggingPod) Name() string { func (p *execLoggingPod) Start(f *framework.Framework) error { framework.Logf("Starting repeating logging pod %s", p.name) - f.PodClient().Create(&v1.Pod{ + e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: p.name, }, diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go index 1ab8a0d2b3f..2e95349f7e8 100644 --- a/test/e2e/instrumentation/monitoring/accelerator.go +++ b/test/e2e/instrumentation/monitoring/accelerator.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/scheduling" @@ -81,7 +82,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) { scheduling.SetupNVIDIAGPUNode(f, false) - f.PodClient().Create(&v1.Pod{ + e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: rcName, }, diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index 78ece15e3a9..77f6b883fe5 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" imageutils "k8s.io/kubernetes/test/utils/image" gcm "google.golang.org/api/monitoring/v3" @@ -234,7 +235,7 @@ func CreateAdapter(adapterDeploymentFile string) error { if err != nil { return err } - stat, err := framework.RunKubectl("", "apply", "-f", adapterURL) + stat, err := e2ekubectl.RunKubectl("", "apply", "-f", adapterURL) framework.Logf(stat) return err } @@ -247,7 +248,7 @@ func createClusterAdminBinding() error { } serviceAccount := strings.TrimSpace(stdout) framework.Logf("current service account: %q", serviceAccount) - stat, err := framework.RunKubectl("", "create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount) + stat, err := e2ekubectl.RunKubectl("", "create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount) framework.Logf(stat) return err } @@ -287,7 +288,7 @@ func CleanupDescriptors(service *gcm.Service, projectID string) { // CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments. func CleanupAdapter(adapterDeploymentFile string) { - stat, err := framework.RunKubectl("", "delete", "-f", adapterDeploymentFile) + stat, err := e2ekubectl.RunKubectl("", "delete", "-f", adapterDeploymentFile) framework.Logf(stat) if err != nil { framework.Logf("Failed to delete adapter deployments: %s", err) @@ -300,7 +301,7 @@ func CleanupAdapter(adapterDeploymentFile string) { } func cleanupClusterAdminBinding() { - stat, err := framework.RunKubectl("", "delete", "clusterrolebinding", ClusterAdminBinding) + stat, err := e2ekubectl.RunKubectl("", "delete", "clusterrolebinding", ClusterAdminBinding) framework.Logf(stat) if err != nil { framework.Logf("Failed to delete cluster admin binding: %s", err) diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index a95b7035041..3e9c2b9d7fb 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -21,7 +21,7 @@ import ( "time" gcm "google.golang.org/api/monitoring/v3" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index 03288ccfee3..bdc37e6909c 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -24,7 +24,7 @@ import ( "reflect" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 5a6561fb884..25a77a82ecd 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -62,11 +62,13 @@ import ( commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" + e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -196,7 +198,7 @@ func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { ginkgo.By("using delete to clean up resources") // support backward compatibility : file paths or raw json - since we are removing file path // dependencies from this test. - framework.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-") assertCleanup(ns, selectors...) } @@ -206,12 +208,12 @@ func assertCleanup(ns string, selectors ...string) { verifyCleanupFunc := func() (bool, error) { e = nil for _, selector := range selectors { - resources := framework.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers") + resources := e2ekubectl.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers") if resources != "" { e = fmt.Errorf("Resources left running after stop:\n%s", resources) return false, nil } - pods := framework.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") + pods := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") if pods != "" { e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods) return false, nil @@ -237,7 +239,7 @@ func runKubectlRetryOrDie(ns string, args ...string) string { var err error var output string for i := 0; i < 5; i++ { - output, err = framework.RunKubectl(ns, args...) + output, err = e2ekubectl.RunKubectl(ns, args...) if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) { break } @@ -282,7 +284,7 @@ var _ = SIGDescribe("Kubectl client", func() { pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout) if err != nil || len(pods) < atLeast { // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it - framework.DumpAllNamespaceInfo(f.ClientSet, ns) + e2edebug.DumpAllNamespaceInfo(f.ClientSet, ns) framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err) } } @@ -338,7 +340,7 @@ var _ = SIGDescribe("Kubectl client", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) ginkgo.By("creating a replication controller") - framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) @@ -351,15 +353,15 @@ var _ = SIGDescribe("Kubectl client", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) ginkgo.By("creating a replication controller") - framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) ginkgo.By("scaling down the replication controller") debugDiscovery() - framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m") + e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m") validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) ginkgo.By("scaling up the replication controller") debugDiscovery() - framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m") + e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m") validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) }) @@ -396,7 +398,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("creating all guestbook components") forEachGBFile(func(contents string) { framework.Logf(contents) - framework.RunKubectlOrDieInput(ns, contents, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-") }) ginkgo.By("validating guestbook app") @@ -409,7 +411,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.BeforeEach(func() { ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) - framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { @@ -418,7 +420,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should support exec", func() { ginkgo.By("executing a command in the container") - execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container") + execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -428,11 +430,11 @@ var _ = SIGDescribe("Kubectl client", func() { for i := 0; i < len(veryLongData); i++ { veryLongData[i] = 'a' } - execOutput = framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData)) + execOutput = e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData)) framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output") ginkgo.By("executing a command in the container with noninteractive stdin") - execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat"). + execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat"). WithStdinData("abcd1234"). ExecOrDie(ns) if e, a := "abcd1234", execOutput; e != a { @@ -448,7 +450,7 @@ var _ = SIGDescribe("Kubectl client", func() { defer closer.Close() ginkgo.By("executing a command in the container with pseudo-interactive stdin") - execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh"). + execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh"). WithStdinReader(r). ExecOrDie(ns) if e, a := "hi", strings.TrimSpace(execOutput); e != a { @@ -458,7 +460,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should support exec using resource/name", func() { ginkgo.By("executing a command in the container") - execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container") + execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -478,7 +480,7 @@ var _ = SIGDescribe("Kubectl client", func() { for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} { proxyLogs.Reset() ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar) - output := framework.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container"). + output := e2ekubectl.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container"). WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))). ExecOrDie(ns) @@ -512,7 +514,7 @@ var _ = SIGDescribe("Kubectl client", func() { //proxyLogs.Reset() host := fmt.Sprintf("--server=http://127.0.0.1:%d", port) ginkgo.By("Running kubectl via kubectl proxy using " + host) - output := framework.NewKubectlCommand( + output := e2ekubectl.NewKubectlCommand( ns, host, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container", ).ExecOrDie(ns) @@ -526,12 +528,12 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.Context("should return command exit codes", func() { ginkgo.It("execing into a container with a successful command", func() { - _, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec() + _, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) }) ginkgo.It("execing into a container with a failing command", func() { - _, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec() + _, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok := err.(uexec.ExitError) if !ok { framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err) @@ -540,12 +542,12 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.It("running a successful command", func() { - _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec() + _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) }) ginkgo.It("running a failing command", func() { - _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() + _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok := err.(uexec.ExitError) if !ok { framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err) @@ -554,7 +556,7 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.It("[Slow] running a failing command without --restart=Never", func() { - _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). + _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() ee, ok := err.(uexec.ExitError) @@ -567,7 +569,7 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func() { - _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). + _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() ee, ok := err.(uexec.ExitError) @@ -581,7 +583,7 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func() { - _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). + _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) @@ -592,7 +594,7 @@ var _ = SIGDescribe("Kubectl client", func() { waitForStdinContent := func(pod, content string) string { var logOutput string err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { - logOutput = framework.RunKubectlOrDie(ns, "logs", pod) + logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod) return strings.Contains(logOutput, content), nil }) @@ -602,7 +604,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("executing a command with run and attach with stdin") // We wait for a non-empty line so we know kubectl has attached - framework.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'"). + e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'"). WithStdinData("value\nabcd1234"). ExecOrDie(ns) @@ -619,7 +621,7 @@ var _ = SIGDescribe("Kubectl client", func() { // "stdin closed", but hasn't exited yet. // We wait 10 seconds before printing to give time to kubectl to attach // to the container, this does not solve the race though. - framework.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). + e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). ExecOrDie(ns) @@ -630,7 +632,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") - framework.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). + e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234\n"). ExecOrDie(ns) @@ -652,13 +654,13 @@ var _ = SIGDescribe("Kubectl client", func() { podName := "run-log-test" ginkgo.By("executing a command with run") - framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") + e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { framework.Failf("Pod for run-log-test was not ready") } - logOutput := framework.RunKubectlOrDie(ns, "logs", "-f", "run-log-test") + logOutput := e2ekubectl.RunKubectlOrDie(ns, "logs", "-f", "run-log-test") gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF")) }) @@ -711,11 +713,11 @@ var _ = SIGDescribe("Kubectl client", func() { framework.ExpectNoError(err) kubectlPath = strings.TrimSpace(string(kubectlPathNormalized)) - inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST")) - inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT")) + inClusterHost := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST")) + inClusterPort := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT")) inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort) framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName) - framework.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/") + e2ekubectl.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/") // Build a kubeconfig file that will make use of the injected ca and token, // but point at the DNS host and the default namespace @@ -745,7 +747,7 @@ users: tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token `), os.FileMode(0755))) framework.Logf("copying override kubeconfig to the %s pod", simplePodName) - framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/") + e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/") framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(` kind: ConfigMap @@ -761,30 +763,30 @@ metadata: name: "configmap without namespace and invalid name" `), os.FileMode(0755))) framework.Logf("copying configmap manifests to the %s pod", simplePodName) - framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") - framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") + e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") + e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") ginkgo.By("getting pods with in-cluster configs") - execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1") + execOutput := e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1") gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running")) gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) ginkgo.By("creating an object containing a namespace with in-cluster config") - _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1") + _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1") gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL))) ginkgo.By("creating an object not containing a namespace with in-cluster config") - _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1") + _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1") gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name))) ginkgo.By("trying to use kubectl with invalid token") - _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") + _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") framework.Logf("got err %v", err) framework.ExpectError(err) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) @@ -792,21 +794,21 @@ metadata: gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized")) ginkgo.By("trying to use kubectl with invalid server") - _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1") + _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1") framework.Logf("got err %v", err) framework.ExpectError(err) gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server")) gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api")) ginkgo.By("trying to use kubectl with invalid namespace") - execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1") + execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1") gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found")) gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort))) ginkgo.By("trying to use kubectl with kubeconfig") - execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1") + execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1") gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods")) @@ -821,7 +823,7 @@ metadata: */ framework.ConformanceIt("should check if v1 is in available api versions ", func() { ginkgo.By("validating api versions") - output := framework.RunKubectlOrDie(ns, "api-versions") + output := e2ekubectl.RunKubectlOrDie(ns, "api-versions") if !strings.Contains(output, "v1") { framework.Failf("No v1 in kubectl api-versions") } @@ -831,12 +833,12 @@ metadata: ginkgo.Describe("Kubectl get componentstatuses", func() { ginkgo.It("should get componentstatuses", func() { ginkgo.By("getting list of componentstatuses") - output := framework.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") + output := e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") components := strings.Split(output, " ") ginkgo.By("getting details of componentstatuses") for _, component := range components { ginkgo.By("getting status of " + component) - framework.RunKubectlOrDie(ns, "get", "componentstatuses", component) + e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", component) } }) }) @@ -846,10 +848,10 @@ metadata: controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) ginkgo.By("creating Agnhost RC") - framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") ginkgo.By("applying a modified configuration") stdin := modifyReplicationControllerConfiguration(controllerJSON) - framework.NewKubectlCommand(ns, "apply", "-f", "-"). + e2ekubectl.NewKubectlCommand(ns, "apply", "-f", "-"). WithStdinReader(stdin). ExecOrDie(ns) ginkgo.By("checking the result") @@ -859,16 +861,16 @@ metadata: serviceJSON := readTestFileOrDie(agnhostServiceFilename) ginkgo.By("creating Agnhost SVC") - framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-") ginkgo.By("getting the original port") - originalNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}") + originalNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}") ginkgo.By("applying the same configuration") - framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-") ginkgo.By("getting the port after applying configuration") - currentNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}") + currentNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}") ginkgo.By("checking the result") if originalNodePort != currentNodePort { @@ -882,20 +884,20 @@ metadata: deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename))) ginkgo.By("deployment replicas number is 2") - framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-") ginkgo.By("check the last-applied matches expectations annotations") - output := framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json") + output := e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json") requiredString := "\"replicas\": 2" if !strings.Contains(output, requiredString) { framework.Failf("Missing %s in kubectl view-last-applied", requiredString) } ginkgo.By("apply file doesn't have replicas") - framework.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-") ginkgo.By("check last-applied has been updated, annotations doesn't have replicas") - output = framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json") + output = e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json") requiredString = "\"replicas\": 2" if strings.Contains(output, requiredString) { framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) @@ -904,13 +906,13 @@ metadata: ginkgo.By("scale set replicas to 3") httpdDeploy := "httpd-deployment" debugDiscovery() - framework.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3") + e2ekubectl.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3") ginkgo.By("apply file doesn't have replicas but image changed") - framework.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-") ginkgo.By("verify replicas still is 3 and image has been updated") - output = framework.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json") + output = e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json") requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)} for _, item := range requiredItems { if !strings.Contains(output, item) { @@ -929,14 +931,14 @@ metadata: framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func() { ginkgo.By("create deployment with httpd image") deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename))) - framework.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-") ginkgo.By("verify diff finds difference between live and declared image") deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1) if !strings.Contains(deployment, busyboxImage) { framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment) } - output, err := framework.RunKubectlInput(ns, deployment, "diff", "-f", "-") + output, err := e2ekubectl.RunKubectlInput(ns, deployment, "diff", "-f", "-") if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 { framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err) } @@ -947,7 +949,7 @@ metadata: } } - framework.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-") }) }) @@ -960,11 +962,11 @@ metadata: framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() { ginkgo.By("running the image " + httpdImage) podName := "e2e-test-httpd-pod" - framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) + e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) ginkgo.By("replace the image in the pod with server-side dry-run") specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, busyboxImage) - framework.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server") + e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server") ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage) pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) @@ -976,7 +978,7 @@ metadata: framework.Failf("Failed creating pod with expected image %s", httpdImage) } - framework.RunKubectlOrDie(ns, "delete", "pods", podName) + e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName) }) }) @@ -1166,7 +1168,7 @@ metadata: }` meta := unknownFieldMetadataJSON(gvk, "test-cr") unknownRootMetaCR := fmt.Sprintf(embeddedCRPattern, meta, "", ns) - _, err = framework.RunKubectlInput(ns, unknownRootMetaCR, "create", "--validate=true", "-f", "-") + _, err = e2ekubectl.RunKubectlInput(ns, unknownRootMetaCR, "create", "--validate=true", "-f", "-") if err == nil { framework.Failf("unexpected nil error when creating CR with unknown root metadata field") } @@ -1180,7 +1182,7 @@ metadata: ginkgo.By("attempting to create a CR with unknown metadata fields in the embedded object") metaEmbedded := fmt.Sprintf(metaPattern, testCRD.Crd.Spec.Names.Kind, testCRD.Crd.Spec.Group, testCRD.Crd.Spec.Versions[0].Name, "test-cr-embedded") unknownEmbeddedMetaCR := fmt.Sprintf(embeddedCRPattern, metaEmbedded, `"unknownMetaEmbedded": "bar",`, ns) - _, err = framework.RunKubectlInput(ns, unknownEmbeddedMetaCR, "create", "--validate=true", "-f", "-") + _, err = e2ekubectl.RunKubectlInput(ns, unknownEmbeddedMetaCR, "create", "--validate=true", "-f", "-") if err == nil { framework.Failf("unexpected nil error when creating CR with unknown embedded metadata field") } @@ -1225,7 +1227,7 @@ metadata: } } ` - _, err := framework.RunKubectlInput(ns, invalidMetaDeployment, "create", "-f", "-") + _, err := e2ekubectl.RunKubectlInput(ns, invalidMetaDeployment, "create", "-f", "-") if err == nil { framework.Failf("unexpected nil error when creating deployment with unknown metadata field") } @@ -1247,7 +1249,7 @@ metadata: */ framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func() { ginkgo.By("validating cluster-info") - output := framework.RunKubectlOrDie(ns, "cluster-info") + output := e2ekubectl.RunKubectlOrDie(ns, "cluster-info") // Can't check exact strings due to terminal control commands (colors) requiredItems := []string{"Kubernetes control plane", "is running at"} for _, item := range requiredItems { @@ -1261,7 +1263,7 @@ metadata: ginkgo.Describe("Kubectl cluster-info dump", func() { ginkgo.It("should check if cluster-info dump succeeds", func() { ginkgo.By("running cluster-info dump") - framework.RunKubectlOrDie(ns, "cluster-info", "dump") + e2ekubectl.RunKubectlOrDie(ns, "cluster-info", "dump") }) }) @@ -1275,15 +1277,15 @@ metadata: controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) serviceJSON := readTestFileOrDie(agnhostServiceFilename) - framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") - framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-") ginkgo.By("Waiting for Agnhost primary to start.") waitForOrFailWithDebug(1) // Pod forEachPod(func(pod v1.Pod) { - output := framework.RunKubectlOrDie(ns, "describe", "pod", pod.Name) + output := e2ekubectl.RunKubectlOrDie(ns, "describe", "pod", pod.Name) requiredStrings := [][]string{ {"Name:", "agnhost-primary-"}, {"Namespace:", ns}, @@ -1317,7 +1319,7 @@ metadata: checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary") // Service - output := framework.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary") + output := e2ekubectl.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary") requiredStrings = [][]string{ {"Name:", "agnhost-primary"}, {"Namespace:", ns}, @@ -1337,7 +1339,7 @@ metadata: nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) node := nodes.Items[0] - output = framework.RunKubectlOrDie(ns, "describe", "node", node.Name) + output = e2ekubectl.RunKubectlOrDie(ns, "describe", "node", node.Name) requiredStrings = [][]string{ {"Name:", node.Name}, {"Labels:"}, @@ -1357,7 +1359,7 @@ metadata: checkOutput(output, requiredStrings) // Namespace - output = framework.RunKubectlOrDie(ns, "describe", "namespace", ns) + output = e2ekubectl.RunKubectlOrDie(ns, "describe", "namespace", ns) requiredStrings = [][]string{ {"Name:", ns}, {"Labels:"}, @@ -1371,7 +1373,7 @@ metadata: ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func() { ginkgo.By("creating a cronjob") cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in"))) - framework.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-") ginkgo.By("waiting for cronjob to start.") err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { @@ -1384,7 +1386,7 @@ metadata: framework.ExpectNoError(err) ginkgo.By("verifying kubectl describe prints") - output := framework.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test") + output := e2ekubectl.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test") requiredStrings := [][]string{ {"Name:", "cronjob-test"}, {"Namespace:", ns}, @@ -1418,14 +1420,14 @@ metadata: ginkgo.By("creating Agnhost RC") framework.Logf("namespace %v", ns) - framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") // It may take a while for the pods to get registered in some cases, wait to be sure. ginkgo.By("Waiting for Agnhost primary to start.") waitForOrFailWithDebug(1) forEachPod(func(pod v1.Pod) { framework.Logf("wait on agnhost-primary startup in %v ", ns) - framework.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout) + e2eoutput.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout) }) validateService := func(name string, servicePort int, timeout time.Duration) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) { @@ -1476,12 +1478,12 @@ metadata: } ginkgo.By("exposing RC") - framework.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort)) + e2ekubectl.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort)) e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm2", 1234, framework.ServiceStartTimeout) ginkgo.By("exposing service") - framework.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort)) + e2ekubectl.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort)) e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm3", 2345, framework.ServiceStartTimeout) }) @@ -1492,7 +1494,7 @@ metadata: ginkgo.BeforeEach(func() { ginkgo.By("creating the pod") podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) - framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { @@ -1509,17 +1511,17 @@ metadata: labelValue := "testing-label-value" ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod") - framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue) + e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue) ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) - output := framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) + output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) if !strings.Contains(output, labelValue) { framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) } ginkgo.By("removing the label " + labelName + " of a pod") - framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-") + e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-") ginkgo.By("verifying the pod doesn't have the label " + labelName) - output = framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) + output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) if strings.Contains(output, labelValue) { framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) } @@ -1531,7 +1533,7 @@ metadata: ginkgo.BeforeEach(func() { ginkgo.By("creating the pod") podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in"))) - framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { @@ -1552,7 +1554,7 @@ metadata: } ginkgo.By("specifying a remote filepath " + podSource + " on the pod") - framework.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name()) + e2ekubectl.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name()) ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) localData, err := io.ReadAll(tempDestination) if err != nil { @@ -1570,10 +1572,10 @@ metadata: ginkgo.BeforeEach(func() { ginkgo.By("creating an pod") // Agnhost image generates logs for a total of 100 lines over 20s. - framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") + e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie(ns, "delete", "pod", podName) + e2ekubectl.RunKubectlOrDie(ns, "delete", "pod", podName) }) /* @@ -1600,23 +1602,23 @@ metadata: } ginkgo.By("checking for a matching strings") - _, err := framework.LookForStringInLog(ns, podName, containerName, "/api/v1/namespaces/kube-system", framework.PodStartTimeout) + _, err := e2eoutput.LookForStringInLog(ns, podName, containerName, "/api/v1/namespaces/kube-system", framework.PodStartTimeout) framework.ExpectNoError(err) ginkgo.By("limiting log lines") - out := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1") + out := e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1") framework.Logf("got output %q", out) gomega.Expect(len(out)).NotTo(gomega.BeZero()) framework.ExpectEqual(len(lines(out)), 1) ginkgo.By("limiting log bytes") - out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--limit-bytes=1") + out = e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--limit-bytes=1") framework.Logf("got output %q", out) framework.ExpectEqual(len(lines(out)), 1) framework.ExpectEqual(len(out), 1) ginkgo.By("exposing timestamps") - out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1", "--timestamps") + out = e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1", "--timestamps") framework.Logf("got output %q", out) l := lines(out) framework.ExpectEqual(len(l), 1) @@ -1633,9 +1635,9 @@ metadata: // because the granularity is only 1 second and // it could end up rounding the wrong way. time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s - recentOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=1s") + recentOut := e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=1s") recent := len(strings.Split(recentOut, "\n")) - olderOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=24h") + olderOut := e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=24h") older := len(strings.Split(olderOut, "\n")) gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut) }) @@ -1650,12 +1652,12 @@ metadata: framework.ConformanceIt("should add annotations for pods in rc ", func() { controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) ginkgo.By("creating Agnhost RC") - framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") ginkgo.By("Waiting for Agnhost primary to start.") waitForOrFailWithDebug(1) ginkgo.By("patching all pods") forEachPod(func(pod v1.Pod) { - framework.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") + e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") }) ginkgo.By("checking annotations") @@ -1681,7 +1683,7 @@ metadata: Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to. */ framework.ConformanceIt("should check is all data is printed ", func() { - versionString := framework.RunKubectlOrDie(ns, "version") + versionString := e2ekubectl.RunKubectlOrDie(ns, "version") // we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+', GitCommit -> alphanumeric requiredItems := []string{"Client Version: ", "Server Version: "} for _, item := range requiredItems { @@ -1700,7 +1702,7 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie(ns, "delete", "pods", podName) + e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName) }) /* @@ -1710,7 +1712,7 @@ metadata: */ framework.ConformanceIt("should create a pod from an image when restart is Never ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage) + e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage) ginkgo.By("verifying the pod " + podName + " was created") pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { @@ -1734,7 +1736,7 @@ metadata: }) ginkgo.AfterEach(func() { - framework.RunKubectlOrDie(ns, "delete", "pods", podName) + e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName) }) /* @@ -1744,7 +1746,7 @@ metadata: */ framework.ConformanceIt("should update a single-container pod's image ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) + e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) ginkgo.By("verifying the pod " + podName + " is running") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) @@ -1754,14 +1756,14 @@ metadata: } ginkgo.By("verifying the pod " + podName + " was created") - podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json") + podJSON := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json") if !strings.Contains(podJSON, podName) { framework.Failf("Failed to find pod %s in [%s]", podName, podJSON) } ginkgo.By("replace the image in the pod") podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1) - framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-") ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) @@ -1943,7 +1945,7 @@ metadata: ginkgo.It("should show event when pod is created ", func() { podName := "e2e-test-httpd-pod" ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) + e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) ginkgo.By("verifying the pod " + podName + " is running") label := labels.SelectorFromSet(map[string]string{"run": podName}) @@ -1953,14 +1955,14 @@ metadata: } ginkgo.By("show started event for this pod") - events := framework.RunKubectlOrDie(ns, "alpha", "events", "--for=pod/"+podName) + events := e2ekubectl.RunKubectlOrDie(ns, "alpha", "events", "--for=pod/"+podName) if !strings.Contains(events, fmt.Sprintf("Normal Scheduled Pod/%s", podName)) { framework.Failf("failed to list expected event") } ginkgo.By("expect not showing any WARNING message") - events = framework.RunKubectlOrDie(ns, "alpha", "events", "--types=WARNING", "--for=pod/"+podName) + events = e2ekubectl.RunKubectlOrDie(ns, "alpha", "events", "--types=WARNING", "--for=pod/"+podName) if events != "" { framework.Failf("unexpected WARNING event fired") } @@ -1972,7 +1974,7 @@ metadata: quotaName := "million" ginkgo.By("calling kubectl quota") - framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000") + e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000") ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) @@ -2000,7 +2002,7 @@ metadata: quotaName := "scopes" ginkgo.By("calling kubectl quota") - framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating") + e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating") ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) @@ -2027,7 +2029,7 @@ metadata: quotaName := "scopes" ginkgo.By("calling kubectl quota") - out, err := framework.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo") + out, err := e2ekubectl.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo") if err == nil { framework.Failf("Expected kubectl to fail, but it succeeded: %s", out) } @@ -2037,8 +2039,8 @@ metadata: ginkgo.Describe("kubectl wait", func() { ginkgo.It("should ignore not found error with --for=delete", func() { ginkgo.By("calling kubectl wait --for=delete") - framework.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist") - framework.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist") + e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist") + e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist") }) }) }) @@ -2073,7 +2075,7 @@ func checkOutput(output string, required [][]string) { func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) { var pollErr error wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - output := framework.RunKubectlOrDie(namespace, args...) + output := e2ekubectl.RunKubectlOrDie(namespace, args...) err := checkOutputReturnError(output, required) if err != nil { pollErr = err @@ -2342,17 +2344,17 @@ func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) { // without being rejected by kubectl validation func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error { ginkgo.By("successfully create CR") - if _, err := framework.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil { return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err) } - if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { + if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { return fmt.Errorf("failed to delete CR %s: %v", name, err) } ginkgo.By("successfully apply CR") - if _, err := framework.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil { + if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil { return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err) } - if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { + if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { return fmt.Errorf("failed to delete CR %s: %v", name, err) } return nil @@ -2387,7 +2389,7 @@ func validateController(c clientset.Interface, containerImage string, replicas i ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector waitLoop: for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) { - getPodsOutput := framework.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname) + getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname) pods := strings.Fields(getPodsOutput) if numPods := len(pods); numPods != replicas { ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods)) @@ -2395,13 +2397,13 @@ waitLoop: } var runningPods []string for _, podID := range pods { - running := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate) + running := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate) if running != "true" { framework.Logf("%s is created but not running", podID) continue waitLoop } - currentImage := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate) + currentImage := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate) currentImage = trimDockerRegistry(currentImage) if currentImage != containerImage { framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage) diff --git a/test/e2e/network/conntrack.go b/test/e2e/network/conntrack.go index 9a17796d620..69e1fc4d787 100644 --- a/test/e2e/network/conntrack.go +++ b/test/e2e/network/conntrack.go @@ -150,7 +150,7 @@ var _ = common.SIGDescribe("Conntrack", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = podClient - fr.PodClient().CreateSync(clientPod) + e2epod.NewPodClient(fr).CreateSync(clientPod) // Read the client pod logs logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) @@ -163,7 +163,7 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod1.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - fr.PodClient().CreateSync(serverPod1) + e2epod.NewPodClient(fr).CreateSync(serverPod1) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) @@ -186,11 +186,11 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod2.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) - fr.PodClient().CreateSync(serverPod2) + e2epod.NewPodClient(fr).CreateSync(serverPod2) // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - fr.PodClient().DeleteSync(podBackend1, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) @@ -226,7 +226,7 @@ var _ = common.SIGDescribe("Conntrack", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = podClient - fr.PodClient().CreateSync(clientPod) + e2epod.NewPodClient(fr).CreateSync(clientPod) // Read the client pod logs logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) @@ -239,7 +239,7 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod1.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - fr.PodClient().CreateSync(serverPod1) + e2epod.NewPodClient(fr).CreateSync(serverPod1) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) @@ -262,11 +262,11 @@ var _ = common.SIGDescribe("Conntrack", func() { serverPod2.Labels = udpJig.Labels nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) - fr.PodClient().CreateSync(serverPod2) + e2epod.NewPodClient(fr).CreateSync(serverPod2) // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - fr.PodClient().DeleteSync(podBackend1, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) @@ -313,7 +313,7 @@ var _ = common.SIGDescribe("Conntrack", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = podClient - fr.PodClient().CreateSync(clientPod) + e2epod.NewPodClient(fr).CreateSync(clientPod) // Read the client pod logs logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) @@ -334,7 +334,7 @@ var _ = common.SIGDescribe("Conntrack", func() { }, } e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) - fr.PodClient().CreateSync(serverPod1) + e2epod.NewPodClient(fr).CreateSync(serverPod1) // wait until the endpoints are ready validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) @@ -411,7 +411,7 @@ var _ = common.SIGDescribe("Conntrack", func() { } nodeSelection := e2epod.NodeSelection{Name: serverNodeInfo.name} e2epod.SetNodeSelection(&serverPod.Spec, nodeSelection) - fr.PodClient().CreateSync(serverPod) + e2epod.NewPodClient(fr).CreateSync(serverPod) ginkgo.By("Server pod created on node " + serverNodeInfo.name) svc := &v1.Service{ @@ -453,7 +453,7 @@ var _ = common.SIGDescribe("Conntrack", func() { nodeSelection = e2epod.NodeSelection{Name: clientNodeInfo.name} e2epod.SetNodeSelection(&pod.Spec, nodeSelection) - fr.PodClient().CreateSync(pod) + e2epod.NewPodClient(fr).CreateSync(pod) ginkgo.By("Client pod created") // The client will open connections against the server diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index f3788a49a1c..b348034de8a 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -432,7 +432,7 @@ var _ = common.SIGDescribe("DNS", func() { runCommand := func(arg string) string { cmd := []string{"/agnhost", arg} - stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ + stdout, stderr, err := e2epod.ExecWithOptions(f, e2epod.ExecOptions{ Command: cmd, Namespace: f.Namespace.Name, PodName: testAgnhostPod.Name, @@ -524,7 +524,7 @@ var _ = common.SIGDescribe("DNS", func() { ginkgo.By("Verifying customized DNS option is configured on pod...") // TODO: Figure out a better way other than checking the actual resolv,conf file. cmd := []string{"cat", "/etc/resolv.conf"} - stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ + stdout, stderr, err := e2epod.ExecWithOptions(f, e2epod.ExecOptions{ Command: cmd, Namespace: f.Namespace.Name, PodName: testUtilsPod.Name, @@ -544,7 +544,7 @@ var _ = common.SIGDescribe("DNS", func() { // - DNS query is sent to the specified server. cmd = []string{"dig", "+short", "+search", testDNSNameShort} digFunc := func() (bool, error) { - stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ + stdout, stderr, err := e2epod.ExecWithOptions(f, e2epod.ExecOptions{ Command: cmd, Namespace: f.Namespace.Name, PodName: testUtilsPod.Name, diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 1c568ef8325..9f3e0d13585 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -126,7 +126,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string { } cmd = append(cmd, dnsName) - stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{ + stdout, stderr, err := e2epod.ExecWithOptions(t.f, e2epod.ExecOptions{ Command: cmd, Namespace: t.f.Namespace.Name, PodName: t.utilPod.Name, diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 24e5ef8249b..6b1d403054c 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -47,7 +47,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func() { - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute) err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name) diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index 458f242a131..5858487e14e 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -35,6 +35,7 @@ import ( e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" @@ -48,11 +49,11 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var cs clientset.Interface - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { cs = f.ClientSet - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) ginkgo.It("should have ipv4 and ipv6 internal node ip", func() { @@ -763,7 +764,7 @@ func assertNetworkConnectivity(f *framework.Framework, serverPods v1.PodList, cl gomega.Consistently(func() error { ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %s", clientPod.Name, ip, port)) cmd := checkNetworkConnectivity(ip, port, timeout) - _, _, err := f.ExecCommandInContainerWithFullOutput(clientPod.Name, containerName, cmd...) + _, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, clientPod.Name, containerName, cmd...) return err }, duration, pollInterval).ShouldNot(gomega.HaveOccurred()) } diff --git a/test/e2e/network/endpointslice.go b/test/e2e/network/endpointslice.go index 723a68251ae..8bcb4df6b4c 100644 --- a/test/e2e/network/endpointslice.go +++ b/test/e2e/network/endpointslice.go @@ -34,6 +34,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -46,11 +47,11 @@ var _ = common.SIGDescribe("EndpointSlice", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var cs clientset.Interface - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { cs = f.ClientSet - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) /* diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index 35a4137db53..e2b4540e64d 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -32,8 +32,10 @@ import ( clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -95,11 +97,11 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { } for _, ns := range namespaces { - framework.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-rc.yaml")), "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-rc.yaml")), "create", "-f", "-") } for _, ns := range namespaces { - framework.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-service.yaml")), "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-service.yaml")), "create", "-f", "-") } // wait for objects @@ -140,14 +142,14 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { podName := pods.Items[0].Name queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendName+"."+namespaces[0].Name) - _, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout) + _, err = e2eoutput.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout) framework.ExpectNoError(err, "waiting for output from pod exec") updatedPodYaml := strings.Replace(read(filepath.Join(clusterDnsPath, "dns-frontend-pod.yaml")), fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain), 1) // create a pod in each namespace for _, ns := range namespaces { - framework.RunKubectlOrDieInput(ns.Name, updatedPodYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns.Name, updatedPodYaml, "create", "-f", "-") } // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember @@ -159,7 +161,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { // wait for pods to print their result for _, ns := range namespaces { - _, err := framework.LookForStringInLog(ns.Name, frontendName, frontendName, podOutput, framework.PodStartTimeout) + _, err := e2eoutput.LookForStringInLog(ns.Name, frontendName, frontendName, podOutput, framework.PodStartTimeout) framework.ExpectNoError(err, "pod %s failed to print result in logs", frontendName) } }) diff --git a/test/e2e/network/fixture.go b/test/e2e/network/fixture.go index c44fb48e862..9dfa2b88fba 100644 --- a/test/e2e/network/fixture.go +++ b/test/e2e/network/fixture.go @@ -18,6 +18,7 @@ package network import ( "context" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/network/funny_ips.go b/test/e2e/network/funny_ips.go index 24d0cba805a..282c3553786 100644 --- a/test/e2e/network/funny_ips.go +++ b/test/e2e/network/funny_ips.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" @@ -117,7 +118,7 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() { ip := netutils.ParseIPSloppy(clusterIPZero) cmd := fmt.Sprintf("echo hostName | nc -v -t -w 2 %s %v", ip.String(), servicePort) err = wait.PollImmediate(1*time.Second, e2eservice.ServiceReachabilityShortPollTimeout, func() (bool, error) { - stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { framework.Logf("Service reachability failing with error: %v\nRetrying...", err) return false, nil @@ -136,7 +137,7 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() { // We have to check that the Service is not reachable in the address interpreted as decimal. cmd = fmt.Sprintf("echo hostName | nc -v -t -w 2 %s %v", clusterIPOctal, servicePort) err = wait.PollImmediate(1*time.Second, e2eservice.ServiceReachabilityShortPollTimeout, func() (bool, error) { - stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { framework.Logf("Service reachability failing with error: %v\nRetrying...", err) return false, nil diff --git a/test/e2e/network/hostport.go b/test/e2e/network/hostport.go index 34b24c8fd49..d6e048856a7 100644 --- a/test/e2e/network/hostport.go +++ b/test/e2e/network/hostport.go @@ -112,7 +112,7 @@ var _ = common.SIGDescribe("HostPort", func() { }, }, } - f.PodClient().CreateSync(hostExecPod) + e2epod.NewPodClient(f).CreateSync(hostExecPod) // use a 5 seconds timeout per connection timeout := 5 @@ -124,14 +124,14 @@ var _ = common.SIGDescribe("HostPort", func() { for i := 0; i < 5; i++ { // check pod1 ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, localhost, port)) - hostname1, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod1...) + hostname1, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, hostExecPod.Name, "e2e-host-exec", cmdPod1...) if err != nil { framework.Logf("Can not connect from %s to pod(pod1) to serverIP: %s, port: %d", hostExecPod.Name, localhost, port) continue } // check pod2 ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port)) - hostname2, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod2...) + hostname2, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, hostExecPod.Name, "e2e-host-exec", cmdPod2...) if err != nil { framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port) continue @@ -143,7 +143,7 @@ var _ = common.SIGDescribe("HostPort", func() { } // check pod3 ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d UDP", hostExecPod.Name, hostIP, port)) - hostname3, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod3...) + hostname3, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, hostExecPod.Name, "e2e-host-exec", cmdPod3...) if err != nil { framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port) continue diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index a8a6e3bd5f3..0e301e46a14 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" @@ -116,7 +117,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { }, }, } - fr.PodClient().CreateSync(hostExecPod) + e2epod.NewPodClient(fr).CreateSync(hostExecPod) // Create the client and server pods clientPodSpec := &v1.Pod{ @@ -184,7 +185,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { serverNodeInfo.name, serverNodeInfo.nodeIP, kubeProxyE2eImage)) - fr.PodClient().CreateSync(serverPodSpec) + e2epod.NewPodClient(fr).CreateSync(serverPodSpec) // The server should be listening before spawning the client pod if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(fr.ClientSet, serverPodSpec.Name, fr.Namespace.Name, framework.PodStartTimeout); readyErr != nil { @@ -196,7 +197,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { clientNodeInfo.name, clientNodeInfo.nodeIP, kubeProxyE2eImage)) - fr.PodClient().CreateSync(clientPodSpec) + e2epod.NewPodClient(fr).CreateSync(clientPodSpec) ginkgo.By("Checking conntrack entries for the timeout") // These must be synchronized from the default values set in @@ -217,7 +218,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { "| grep -m 1 'CLOSE_WAIT.*dport=%v' ", ipFamily, ip, testDaemonTCPPort) if err := wait.PollImmediate(2*time.Second, epsilonSeconds*time.Second, func() (bool, error) { - result, err := framework.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", cmd) + result, err := e2eoutput.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", cmd) // retry if we can't obtain the conntrack entry if err != nil { framework.Logf("failed to obtain conntrack entry: %v %v", result, err) @@ -239,7 +240,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { return false, fmt.Errorf("wrong TCP CLOSE_WAIT timeout: %v expected: %v", timeoutSeconds, expectedTimeoutSeconds) }); err != nil { // Dump all conntrack entries for debugging - result, err2 := framework.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", "conntrack -L") + result, err2 := e2eoutput.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", "conntrack -L") if err2 != nil { framework.Logf("failed to obtain conntrack entry: %v %v", result, err2) } diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index 521df2d45ba..0fbf4392035 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -38,6 +38,7 @@ import ( e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" "k8s.io/kubernetes/test/e2e/framework/providers/gce" e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" @@ -628,7 +629,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { tcpIngressIP := e2eservice.GetIngressPoint(lbIngress) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort) - stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) + stdout, err := e2eoutput.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { framework.Logf("error curling; stdout: %v. err: %v", stdout, err) return false, nil @@ -1219,7 +1220,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName)) if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) { - stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd) + stdout, err := e2eoutput.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd) if err != nil { framework.Logf("got err: %v, retry until timeout", err) return false, nil diff --git a/test/e2e/network/netpol/kubemanager.go b/test/e2e/network/netpol/kubemanager.go index 9b7a85965be..35f23fdade3 100644 --- a/test/e2e/network/netpol/kubemanager.go +++ b/test/e2e/network/netpol/kubemanager.go @@ -191,7 +191,7 @@ func (k *kubeManager) probeConnectivity(args *probeConnectivityArgs) (bool, stri // executeRemoteCommand executes a remote shell command on the given pod. func (k *kubeManager) executeRemoteCommand(namespace string, pod string, containerName string, command []string) (string, string, error) { - return k.framework.ExecWithOptions(framework.ExecOptions{ + return e2epod.ExecWithOptions(k.framework, e2epod.ExecOptions{ Command: command, Namespace: namespace, PodName: pod, diff --git a/test/e2e/network/netpol/network_legacy.go b/test/e2e/network/netpol/network_legacy.go index a46ca8bdfcc..253d988e340 100644 --- a/test/e2e/network/netpol/network_legacy.go +++ b/test/e2e/network/netpol/network_legacy.go @@ -40,6 +40,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" @@ -1934,7 +1935,7 @@ func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.P err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) if err != nil { // Dump debug information for the test namespace. - framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) + e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name) pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient) framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) @@ -1950,7 +1951,7 @@ func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1 // Dump debug information if the error was nil. if err == nil { // Dump debug information for the test namespace. - framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) + e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name) pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient) framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) @@ -1976,7 +1977,7 @@ func checkNoConnectivityByExitCode(f *framework.Framework, ns *v1.Namespace, pod framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) // Dump debug information for the test namespace. - framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) + e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name) } } diff --git a/test/e2e/network/netpol/reachability.go b/test/e2e/network/netpol/reachability.go index 672a1af0ca0..2f5a47f9be3 100644 --- a/test/e2e/network/netpol/reachability.go +++ b/test/e2e/network/netpol/reachability.go @@ -18,9 +18,10 @@ package netpol import ( "fmt" + "strings" + v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" - "strings" ) // TestCase describes the data for a netpol test diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index c6f2a7c8844..a91abb53c55 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -33,6 +33,7 @@ import ( e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -234,7 +235,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", podName := pod.Name nodeName := pod.Spec.NodeName - iperfVersion := f.ExecShellInPod(podName, "iperf -v || true") + iperfVersion := e2epod.ExecShellInPod(f, podName, "iperf -v || true") framework.Logf("iperf version: %s", iperfVersion) for try := 0; ; try++ { @@ -247,7 +248,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", */ command := fmt.Sprintf(`iperf %s -e -p %d --reportstyle C -i 1 -c %s && sleep 5`, familyStr, iperf2Port, serverServiceName) framework.Logf("attempting to run command '%s' in client pod %s (node %s)", command, podName, nodeName) - output := f.ExecShellInPod(podName, command) + output := e2epod.ExecShellInPod(f, podName, command) framework.Logf("output from exec on client pod %s (node %s): \n%s\n", podName, nodeName, output) results, err := ParseIPerf2EnhancedResultsFromCSV(output) diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index bea981c8888..6026de245e4 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -27,7 +27,7 @@ import ( "k8s.io/klog/v2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 7a596eb14ab..d9576b1b9e2 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -64,6 +64,8 @@ import ( e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" + e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers" e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -134,7 +136,7 @@ func affinityCheckFromPod(execPod *v1.Pod, serviceIP string, servicePort int) (t curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort) cmd := fmt.Sprintf("for i in $(seq 0 %d); do echo; %s ; done", AffinityConfirmCount, curl) getHosts := func() []string { - stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort) return nil @@ -343,7 +345,7 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods 50*len(expectedPods), wgetCmd, serviceIPPort) framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, podName) // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. - output, err := framework.RunHostCmd(ns, podName, cmd) + output, err := e2eoutput.RunHostCmd(ns, podName, cmd) if err != nil { framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, podName, err, output) } @@ -406,7 +408,7 @@ func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP "curl -g -s --connect-timeout 2 http://%s && echo service-down-failed", ipPort) for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) { - output, err := framework.RunHostCmd(ns, hostExecPod.Name, command) + output, err := e2eoutput.RunHostCmd(ns, hostExecPod.Name, command) if err != nil { framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", command, ns, hostExecPod.Name, err, output) } @@ -1762,7 +1764,7 @@ var _ = common.SIGDescribe("Services", func() { var stdout string if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error - stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) + stdout, err = e2eoutput.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { framework.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout) return false, nil @@ -1855,7 +1857,7 @@ var _ = common.SIGDescribe("Services", func() { var stdout string if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error - stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) + stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) return false, nil @@ -1878,7 +1880,7 @@ var _ = common.SIGDescribe("Services", func() { cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/; test \"$?\" -ne \"0\"", svcName, port) if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error - stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) + stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) return false, nil @@ -1898,7 +1900,7 @@ var _ = common.SIGDescribe("Services", func() { cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port) if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error - stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) + stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) return false, nil @@ -2153,7 +2155,7 @@ var _ = common.SIGDescribe("Services", func() { clusterIPAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort)) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress) if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { - _, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) if err != nil { return true, nil } @@ -2169,15 +2171,15 @@ var _ = common.SIGDescribe("Services", func() { // connect 3 times every 5 seconds to the Service and expect a failure for i := 0; i < 5; i++ { cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress) - _, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to cluster IP") cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress0) - _, err = framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err = e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to NodePort address") cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1) - _, err = framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err = e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to NodePort address") time.Sleep(5 * time.Second) @@ -2410,7 +2412,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName)) expectedErr := "REFUSED" if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { - _, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + _, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { if strings.Contains(err.Error(), expectedErr) { @@ -2452,7 +2454,7 @@ var _ = common.SIGDescribe("Services", func() { evictedPod.Spec.Containers[0].Resources = v1.ResourceRequirements{ Limits: v1.ResourceList{"ephemeral-storage": resource.MustParse("5Mi")}, } - f.PodClient().Create(evictedPod) + e2epod.NewPodClient(f).Create(evictedPod) err = e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, evictedPod.Name, "Evicted", f.Namespace.Name) if err != nil { framework.Failf("error waiting for pod to be evicted: %v", err) @@ -2501,7 +2503,7 @@ var _ = common.SIGDescribe("Services", func() { ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v expected to be refused", serviceAddress, podName, nodeName)) expectedErr := "REFUSED" if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { - _, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + _, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { if strings.Contains(err.Error(), expectedErr) { @@ -2590,7 +2592,7 @@ var _ = common.SIGDescribe("Services", func() { // the second pause pod is on a different node, so it should see a connection error every time cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) - _, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to cluster IP") } }) @@ -2670,7 +2672,7 @@ var _ = common.SIGDescribe("Services", func() { // the second pause pod is on a different node, so it should see a connection error every time cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) - _, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to cluster IP") } }) @@ -2753,7 +2755,7 @@ var _ = common.SIGDescribe("Services", func() { // the second pause pod is on a different node, so it should see a connection error every time cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) - _, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to cluster IP") } @@ -2782,7 +2784,7 @@ var _ = common.SIGDescribe("Services", func() { // the second pause pod is on a different node, so it should see a connection error every time cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) - _, err := framework.RunHostCmd(pausePod3.Namespace, pausePod3.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod3.Namespace, pausePod3.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to cluster IP") } }) @@ -2847,7 +2849,7 @@ var _ = common.SIGDescribe("Services", func() { // validate that the health check node port from kube-proxy returns 200 when there are ready endpoints err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr) - out, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) + out, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) if err != nil { return false, err } @@ -2868,7 +2870,7 @@ var _ = common.SIGDescribe("Services", func() { // validate that the health check node port from kube-proxy returns 503 when there are no ready endpoints err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr) - out, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) + out, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) if err != nil { return false, err } @@ -3050,7 +3052,7 @@ var _ = common.SIGDescribe("Services", func() { execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) - _, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to cluster IP") time.Sleep(5 * time.Second) @@ -3226,7 +3228,7 @@ var _ = common.SIGDescribe("Services", func() { // pausePod0 -> node0 and pausePod1 -> node1 both succeed because pod-to-same-node-NodePort // connections are neither internal nor external and always get Cluster traffic policy. cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1) - _, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) + _, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) framework.ExpectError(err, "expected error when trying to connect to node port for pausePod0") execHostnameTest(*pausePod0, nodePortAddress0, webserverPod0.Name) @@ -3820,7 +3822,7 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client hosts := sets.NewString() cmd := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, net.JoinHostPort(svcIP, strconv.Itoa(servicePort))) for i := 0; i < 10; i++ { - hostname, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + hostname, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err == nil { hosts.Insert(hostname) if hosts.Len() > 1 { @@ -3999,7 +4001,7 @@ func createPodOrFail(f *framework.Framework, ns, name string, labels map[string] // Add a dummy environment variable to work around a docker issue. // https://github.com/docker/docker/issues/14203 pod.Spec.Containers[0].Env = []v1.EnvVar{{Name: "FOO", Value: " "}} - f.PodClient().CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(pod) } // launchHostExecPod launches a hostexec pod in the given namespace and waits @@ -4018,7 +4020,7 @@ func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { func checkReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) { cmd := fmt.Sprintf("wget -T 5 -qO- %q", target) err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - _, err := framework.RunHostCmd(namespace, pod, cmd) + _, err := e2eoutput.RunHostCmd(namespace, pod, cmd) if expectToBeReachable && err != nil { framework.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err) return false, nil @@ -4037,11 +4039,11 @@ func checkReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n func proxyMode(f *framework.Framework) (string, error) { pod := e2epod.NewAgnhostPod(f.Namespace.Name, "kube-proxy-mode-detector", nil, nil, nil) pod.Spec.HostNetwork = true - f.PodClient().CreateSync(pod) - defer f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).CreateSync(pod) + defer e2epod.NewPodClient(f).DeleteSync(pod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode" - stdout, err := framework.RunHostCmd(pod.Namespace, pod.Name, cmd) + stdout, err := e2eoutput.RunHostCmd(pod.Namespace, pod.Name, cmd) if err != nil { return "", err } @@ -4159,7 +4161,7 @@ func restartApiserver(namespace string, cs clientset.Interface) error { if err != nil { return err } - return framework.MasterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v' + return e2eproviders.MasterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v' } return restartComponent(cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) @@ -4266,7 +4268,7 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() { e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection) ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name)) - f.PodClient().CreateSync(podSpec) + e2epod.NewPodClient(f).CreateSync(podSpec) defer func() { err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name) diff --git a/test/e2e/network/topology_hints.go b/test/e2e/network/topology_hints.go index ca436c3ae59..59c4014758e 100644 --- a/test/e2e/network/topology_hints.go +++ b/test/e2e/network/topology_hints.go @@ -184,7 +184,7 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do sleep 1; echo "Date: $(date) Try: ${i}"; curl -q -s --connect-timeout 2 http://%s:80/ ; echo; done`, svc.Name) clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Name = clientPod.Name - f.PodClient().CreateSync(clientPod) + e2epod.NewPodClient(f).CreateSync(clientPod) framework.Logf("Ensuring that requests from %s pod on %s node stay in %s zone", clientPod.Name, nodeName, fromZone) diff --git a/test/e2e/network/util.go b/test/e2e/network/util.go index 0818627ca3f..b67ec330d7c 100644 --- a/test/e2e/network/util.go +++ b/test/e2e/network/util.go @@ -29,7 +29,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -73,7 +75,7 @@ func GetHTTPContentFromTestContainer(config *e2enetwork.NetworkingTestConfig, ho // DescribeSvc logs the output of kubectl describe svc for the given namespace func DescribeSvc(ns string) { framework.Logf("\nOutput of kubectl describe svc:\n") - desc, _ := framework.RunKubectl( + desc, _ := e2ekubectl.RunKubectl( ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) framework.Logf(desc) } @@ -117,7 +119,7 @@ func execSourceIPTest(sourcePod v1.Pod, targetAddr string) (string, string) { framework.Logf("Waiting up to %v to get response from %s", timeout, targetAddr) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/clientip`, targetAddr) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { - stdout, err = framework.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd) + stdout, err = e2eoutput.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd) if err != nil { framework.Logf("got err: %v, retry until timeout", err) continue @@ -155,7 +157,7 @@ func execHostnameTest(sourcePod v1.Pod, targetAddr, targetHostname string) { framework.Logf("Waiting up to %v to get response from %s", timeout, targetAddr) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/hostname`, targetAddr) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { - stdout, err = framework.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd) + stdout, err = e2eoutput.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd) if err != nil { framework.Logf("got err: %v, retry until timeout", err) continue diff --git a/test/e2e/node/apparmor.go b/test/e2e/node/apparmor.go index 4f731fa054d..2600704f67a 100644 --- a/test/e2e/node/apparmor.go +++ b/test/e2e/node/apparmor.go @@ -19,6 +19,7 @@ package node import ( "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2esecurity "k8s.io/kubernetes/test/e2e/framework/security" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" admissionapi "k8s.io/pod-security-admission/api" @@ -43,11 +44,11 @@ var _ = SIGDescribe("AppArmor", func() { }) ginkgo.It("should enforce an AppArmor profile", func() { - e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), false, true) + e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true) }) ginkgo.It("can disable an AppArmor profile, using unconfined", func() { - e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), true, true) + e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), true, true) }) }) }) diff --git a/test/e2e/node/examples.go b/test/e2e/node/examples.go index 6a3856dd227..49e11b3df15 100644 --- a/test/e2e/node/examples.go +++ b/test/e2e/node/examples.go @@ -32,7 +32,9 @@ import ( commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" admissionapi "k8s.io/pod-security-admission/api" @@ -71,8 +73,8 @@ var _ = SIGDescribe("[Feature:Example]", func() { execYaml := readFile(test, "exec-liveness.yaml.in") httpYaml := readFile(test, "http-liveness.yaml.in") - framework.RunKubectlOrDieInput(ns, execYaml, "create", "-f", "-") - framework.RunKubectlOrDieInput(ns, httpYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, execYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, httpYaml, "create", "-f", "-") // Since both containers start rapidly, we can easily run this test in parallel. var wg sync.WaitGroup @@ -121,13 +123,13 @@ var _ = SIGDescribe("[Feature:Example]", func() { podName := "secret-test-pod" ginkgo.By("creating secret and pod") - framework.RunKubectlOrDieInput(ns, secretYaml, "create", "-f", "-") - framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, secretYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) ginkgo.By("checking if secret was read correctly") - _, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout) + _, err = e2eoutput.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout) framework.ExpectNoError(err) }) }) @@ -139,14 +141,14 @@ var _ = SIGDescribe("[Feature:Example]", func() { podName := "dapi-test-pod" ginkgo.By("creating the pod") - framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) ginkgo.By("checking if name and namespace were passed correctly") - _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout) + _, err = e2eoutput.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout) framework.ExpectNoError(err) - _, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout) + _, err = e2eoutput.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout) framework.ExpectNoError(err) }) }) diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index 3e011d36e30..6e8b070a574 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -110,7 +110,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p func restartNfsServer(serverPod *v1.Pod) { const startcmd = "/usr/sbin/rpc.nfsd 1" ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace) - framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd) + e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd) } // Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the @@ -119,7 +119,7 @@ func restartNfsServer(serverPod *v1.Pod) { func stopNfsServer(serverPod *v1.Pod) { const stopcmd = "/usr/sbin/rpc.nfsd 0" ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace) - framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd) + e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd) } // Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container @@ -310,7 +310,7 @@ var _ = SIGDescribe("kubelet", func() { } for nodeName := range nodeNames { for k, v := range nodeLabels { - framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) + e2enode.AddOrUpdateLabelOnNode(c, nodeName, k, v) } } @@ -334,7 +334,7 @@ var _ = SIGDescribe("kubelet", func() { // If we added labels to nodes in this test, remove them now. for nodeName := range nodeNames { for k := range nodeLabels { - framework.RemoveLabelOffNode(c, nodeName, k) + e2enode.RemoveLabelOffNode(c, nodeName, k) } } }) diff --git a/test/e2e/node/mount_propagation.go b/test/e2e/node/mount_propagation.go index 9670c854f34..5a9700d8069 100644 --- a/test/e2e/node/mount_propagation.go +++ b/test/e2e/node/mount_propagation.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -111,7 +112,7 @@ var _ = SIGDescribe("Mount propagation", func() { hostExec.IssueCommand(cleanCmd, node) }() - podClient := f.PodClient() + podClient := e2epod.NewPodClient(f) bidirectional := v1.MountPropagationBidirectional master := podClient.CreateSync(preparePod("master", node, &bidirectional, hostDir)) @@ -128,18 +129,18 @@ var _ = SIGDescribe("Mount propagation", func() { for _, podName := range podNames { for _, dirName := range podNames { cmd := fmt.Sprintf("test -d /mnt/test/%s", dirName) - f.ExecShellInPod(podName, cmd) + e2epod.ExecShellInPod(f, podName, cmd) } } // Each pod mounts one tmpfs to /mnt/test/ and puts a file there. for _, podName := range podNames { cmd := fmt.Sprintf("mount -t tmpfs e2e-mount-propagation-%[1]s /mnt/test/%[1]s; echo %[1]s > /mnt/test/%[1]s/file", podName) - f.ExecShellInPod(podName, cmd) + e2epod.ExecShellInPod(f, podName, cmd) // unmount tmpfs when the test finishes cmd = fmt.Sprintf("umount /mnt/test/%s", podName) - defer f.ExecShellInPod(podName, cmd) + defer e2epod.ExecShellInPod(f, podName, cmd) } // The host mounts one tmpfs to testdir/host and puts a file there so we @@ -170,7 +171,7 @@ var _ = SIGDescribe("Mount propagation", func() { for podName, mounts := range expectedMounts { for _, mountName := range dirNames { cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName) - stdout, stderr, err := f.ExecShellInPodWithFullOutput(podName, cmd) + stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(f, podName, cmd) framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err) msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName) shouldBeVisible := mounts.Has(mountName) diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index 8999af61016..3d955d01637 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -55,9 +55,9 @@ var _ = SIGDescribe("Pods Extended", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Describe("Delete Grace Period", func() { - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) /* @@ -148,9 +148,9 @@ var _ = SIGDescribe("Pods Extended", func() { }) ginkgo.Describe("Pods Set QOS Class", func() { - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) /* @@ -200,9 +200,9 @@ var _ = SIGDescribe("Pods Extended", func() { }) ginkgo.Describe("Pod Container Status", func() { - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) ginkgo.It("should never report success for a pending container", func() { @@ -224,9 +224,9 @@ var _ = SIGDescribe("Pods Extended", func() { }) ginkgo.Describe("Pod Container lifecycle", func() { - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) ginkgo.It("should not create extra sandbox if all containers are done", func() { diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index 77be36aca64..d1c3f94acd8 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -155,9 +155,9 @@ func testPreStop(c clientset.Interface, ns string) { var _ = SIGDescribe("PreStop", func() { f := framework.NewDefaultFramework("prestop") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) /* diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index c020c208922..3c4ea5d0abf 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -19,6 +19,7 @@ package node import ( "context" "fmt" + "k8s.io/pod-security-admission/api" v1 "k8s.io/api/core/v1" @@ -89,9 +90,9 @@ var _ = SIGDescribe("RuntimeClass", func() { ginkgo.By("Trying to apply a label on the found node.") for key, value := range nodeSelector { - framework.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) - framework.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) - defer framework.RemoveLabelOffNode(f.ClientSet, nodeName, key) + e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) + e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) + defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key) } ginkgo.By("Trying to apply taint on the found node.") @@ -101,7 +102,7 @@ var _ = SIGDescribe("RuntimeClass", func() { Effect: v1.TaintEffectNoSchedule, } e2enode.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint) - framework.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint) + e2enode.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint) defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, taint) ginkgo.By("Trying to create runtimeclass and pod") @@ -114,7 +115,7 @@ var _ = SIGDescribe("RuntimeClass", func() { pod.Spec.NodeSelector = map[string]string{ labelFooName: "bar", } - pod = f.PodClient().Create(pod) + pod = e2epod.NewPodClient(f).Create(pod) framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name)) @@ -145,9 +146,9 @@ var _ = SIGDescribe("RuntimeClass", func() { ginkgo.By("Trying to apply a label on the found node.") for key, value := range nodeSelector { - framework.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) - framework.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) - defer framework.RemoveLabelOffNode(f.ClientSet, nodeName, key) + e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) + e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) + defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key) } ginkgo.By("Trying to create runtimeclass and pod") @@ -160,7 +161,7 @@ var _ = SIGDescribe("RuntimeClass", func() { pod.Spec.NodeSelector = map[string]string{ labelFooName: "bar", } - pod = f.PodClient().Create(pod) + pod = e2epod.NewPodClient(f).Create(pod) framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name)) diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index 9f0a6aa140f..b18b003ab45 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -73,7 +74,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} groups := []string{"1234", "5678"} - f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) + e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) }) ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() { @@ -82,7 +83,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.SecurityContext.RunAsUser = &userID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", userID), fmt.Sprintf("gid=%v", 0), }) @@ -102,7 +103,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.SecurityContext.RunAsGroup = &groupID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", userID), fmt.Sprintf("gid=%v", groupID), }) @@ -117,7 +118,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", overrideUserID), fmt.Sprintf("gid=%v", 0), }) @@ -142,7 +143,7 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].SecurityContext.RunAsGroup = &overrideGroupID pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} - f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ + e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ fmt.Sprintf("uid=%v", overrideUserID), fmt.Sprintf("gid=%v", overrideGroupID), }) @@ -165,27 +166,27 @@ var _ = SIGDescribe("Security Context", func() { pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - f.TestContainerOutput("seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled + e2eoutput.TestContainerOutput(f, "seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled }) ginkgo.It("should support seccomp unconfined on the pod [LinuxOnly]", func() { pod := scTestPod(false, false) pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - f.TestContainerOutput("seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled + e2eoutput.TestContainerOutput(f, "seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled }) ginkgo.It("should support seccomp runtime/default [LinuxOnly]", func() { pod := scTestPod(false, false) pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - f.TestContainerOutput("seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered + e2eoutput.TestContainerOutput(f, "seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered }) ginkgo.It("should support seccomp default which is unconfined [LinuxOnly]", func() { pod := scTestPod(false, false) pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} - f.TestContainerOutput("seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled + e2eoutput.TestContainerOutput(f, "seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled }) }) @@ -262,7 +263,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ Level: "s0:c0,c1", } - f.TestContainerOutput("Pod with same MCS label reading test file", pod, 0, []string{testContent}) + e2eoutput.TestContainerOutput(f, "Pod with same MCS label reading test file", pod, 0, []string{testContent}) // Confirm that the same pod with a different MCS // label cannot access the volume diff --git a/test/e2e/node/taints.go b/test/e2e/node/taints.go index e8d76318969..cbff5547515 100644 --- a/test/e2e/node/taints.go +++ b/test/e2e/node/taints.go @@ -191,7 +191,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) // Wait a bit @@ -223,7 +223,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) // Wait a bit @@ -256,7 +256,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) // Wait a bit @@ -303,7 +303,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) taintRemoved := false defer func() { if !taintRemoved { @@ -378,11 +378,11 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { ginkgo.By("Trying to apply a taint on the Nodes") testTaint := getTestTaint() e2enode.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName1, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName1, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName1, testTaint) if nodeName2 != nodeName1 { e2enode.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName2, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName2, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName2, testTaint) } @@ -451,7 +451,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { ginkgo.By("Trying to apply a taint on the Node") testTaint := getTestTaint() e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) // 3. Wait to see if both pods get evicted in between [5, 25] seconds diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 8b7afcdb2f8..298dc41732c 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/test/e2e/framework" + e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" e2ejob "k8s.io/kubernetes/test/e2e/framework/job" e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest" @@ -127,7 +128,7 @@ func getGPUsAvailable(f *framework.Framework) int64 { } // SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes -func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer { +func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *e2edebug.ContainerResourceGatherer { logOSImages(f) var err error @@ -161,10 +162,10 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra pods.Items = append(pods.Items, devicepluginPods.Items...) } - var rsgather *framework.ContainerResourceGatherer + var rsgather *e2edebug.ContainerResourceGatherer if setupResourceGatherer { framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") - rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) + rsgather, err = e2edebug.NewResourceUsageGatherer(f.ClientSet, e2edebug.ResourceGathererOptions{InKubemark: false, Nodes: e2edebug.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") go rsgather.StartGatheringData() } @@ -195,17 +196,17 @@ func testNvidiaGPUs(f *framework.Framework) { framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum) podList := []*v1.Pod{} for i := int64(0); i < gpuPodNum; i++ { - podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod())) + podList = append(podList, e2epod.NewPodClient(f).Create(makeCudaAdditionDevicePluginTestPod())) } framework.Logf("Wait for all test pods to succeed") // Wait for all pods to succeed for _, pod := range podList { - f.PodClient().WaitForSuccess(pod.Name, 5*time.Minute) + e2epod.NewPodClient(f).WaitForSuccess(pod.Name, 5*time.Minute) logContainers(f, pod) } framework.Logf("Stopping ResourceUsageGather") - constraints := make(map[string]framework.ResourceConstraint) + constraints := make(map[string]e2edebug.ResourceConstraint) // For now, just gets summary. Can pass valid constraints in the future. summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints) f.TestSummaries = append(f.TestSummaries, summary) @@ -299,7 +300,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) { successes := int32(0) regex := regexp.MustCompile("PASSED") for _, podName := range createdPodNames { - f.PodClient().WaitForFinish(podName, 5*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(podName, 5*time.Minute) logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podName, "vector-addition") framework.ExpectNoError(err, "Should be able to get logs for pod %v", podName) if regex.MatchString(logs) { diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 451952c2417..93957429d3b 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeList = &v1.NodeList{} var err error - framework.AllNodesReady(cs, time.Minute) + e2enode.AllNodesReady(cs, time.Minute) nodeList, err = e2enode.GetReadySchedulableNodes(cs) if err != nil { @@ -339,8 +339,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { continue } // Apply node label to each node - framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name) - framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name) + e2enode.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name) + e2enode.ExpectNodeHasLabel(cs, node.Name, "node", node.Name) // Find allocatable amount of CPU. allocatable, found := node.Status.Allocatable[v1.ResourceCPU] if !found { @@ -354,7 +354,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // Clean up added labels after this test. defer func() { for nodeName := range nodeToAllocatableMap { - framework.RemoveLabelOffNode(cs, nodeName, "node") + e2enode.RemoveLabelOffNode(cs, nodeName, "node") } }() @@ -464,9 +464,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "42" - framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - framework.ExpectNodeHasLabel(cs, nodeName, k, v) - defer framework.RemoveLabelOffNode(cs, nodeName, k) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + defer e2enode.RemoveLabelOffNode(cs, nodeName, k) ginkgo.By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" @@ -537,9 +537,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) v := "42" - framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - framework.ExpectNodeHasLabel(cs, nodeName, k, v) - defer framework.RemoveLabelOffNode(cs, nodeName, k) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + defer e2enode.RemoveLabelOffNode(cs, nodeName, k) ginkgo.By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" @@ -589,15 +589,15 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Effect: v1.TaintEffectNoSchedule, } e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelValue := "testing-label-value" - framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) - framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) - defer framework.RemoveLabelOffNode(cs, nodeName, labelKey) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) + e2enode.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) + defer e2enode.RemoveLabelOffNode(cs, nodeName, labelKey) ginkgo.By("Trying to relaunch the pod, now with tolerations.") tolerationPodName := "with-tolerations" @@ -632,15 +632,15 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Effect: v1.TaintEffectNoSchedule, } e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) ginkgo.By("Trying to apply a random label on the found node.") labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelValue := "testing-label-value" - framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) - framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) - defer framework.RemoveLabelOffNode(cs, nodeName, labelKey) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) + e2enode.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) + defer e2enode.RemoveLabelOffNode(cs, nodeName, labelKey) ginkgo.By("Trying to relaunch the pod, still no tolerations.") podNameNoTolerations := "still-no-tolerations" @@ -674,9 +674,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeSelector := make(map[string]string) nodeSelector[k] = v - framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - framework.ExpectNodeHasLabel(cs, nodeName, k, v) - defer framework.RemoveLabelOffNode(cs, nodeName, k) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + defer e2enode.RemoveLabelOffNode(cs, nodeName, k) port := int32(54321) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost)) @@ -707,9 +707,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeSelector := make(map[string]string) nodeSelector[k] = v - framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - framework.ExpectNodeHasLabel(cs, nodeName, k, v) - defer framework.RemoveLabelOffNode(cs, nodeName, k) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + defer e2enode.RemoveLabelOffNode(cs, nodeName, k) port := int32(54322) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port)) @@ -731,12 +731,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { nodeNames = Get2NodesThatCanRunPod(f) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) for _, nodeName := range nodeNames { - framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) } }) ginkgo.AfterEach(func() { for _, nodeName := range nodeNames { - framework.RemoveLabelOffNode(cs, nodeName, topologyKey) + e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey) } }) diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 3aa6616bc96..1c3e424fc5a 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -327,7 +327,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeNames = Get2NodesThatCanRunPod(f) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) for _, nodeName := range nodeNames { - framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -342,7 +342,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }) ginkgo.AfterEach(func() { for _, nodeName := range nodeNames { - framework.RemoveLabelOffNode(cs, nodeName, topologyKey) + e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey) } for _, node := range nodes { nodeCopy := node.DeepCopy() diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 8bdc3e568c9..95c4df6593d 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -142,17 +142,17 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { ginkgo.By("Trying to apply a label on the found node.") k = "kubernetes.io/e2e-node-topologyKey" v := "topologyvalue1" - framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) - framework.ExpectNodeHasLabel(cs, nodeName, k, v) - defer framework.RemoveLabelOffNode(cs, nodeName, k) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v) + e2enode.ExpectNodeHasLabel(cs, nodeName, k, v) + defer e2enode.RemoveLabelOffNode(cs, nodeName, k) ginkgo.By("Trying to apply a label on other nodes.") v = "topologyvalue2" for _, node := range nodeList.Items { if node.Name != nodeName { - framework.AddOrUpdateLabelOnNode(cs, node.Name, k, v) - framework.ExpectNodeHasLabel(cs, node.Name, k, v) - defer framework.RemoveLabelOffNode(cs, node.Name, k) + e2enode.AddOrUpdateLabelOnNode(cs, node.Name, k, v) + e2enode.ExpectNodeHasLabel(cs, node.Name, k, v) + defer e2enode.RemoveLabelOffNode(cs, node.Name, k) } } } @@ -276,12 +276,12 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { nodeNames = Get2NodesThatCanRunPod(f) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) for _, nodeName := range nodeNames { - framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) + e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) } }) ginkgo.AfterEach(func() { for _, nodeName := range nodeNames { - framework.RemoveLabelOffNode(cs, nodeName, topologyKey) + e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey) } }) @@ -561,5 +561,5 @@ func getRandomTaint() v1.Taint { func addTaintToNode(cs clientset.Interface, nodeName string, testTaint v1.Taint) { e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) - framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) } diff --git a/test/e2e/storage/drivers/proxy/io.go b/test/e2e/storage/drivers/proxy/io.go index 8e58518384b..b44962a32d4 100644 --- a/test/e2e/storage/drivers/proxy/io.go +++ b/test/e2e/storage/drivers/proxy/io.go @@ -21,6 +21,7 @@ import ( "io" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service" ) @@ -87,7 +88,7 @@ func (p PodDirIO) RemoveAll(path string) error { } func (p PodDirIO) execute(command []string, stdin io.Reader) (string, string, error) { - return p.F.ExecWithOptions(framework.ExecOptions{ + return e2epod.ExecWithOptions(p.F, e2epod.ExecOptions{ Command: command, Namespace: p.Namespace, PodName: p.PodName, diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index cfe701a8dc6..5daa9672131 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -146,7 +146,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { }, }, } - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) defer func() { ginkgo.By("Cleaning up the secret") @@ -218,7 +218,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle gitServerPod := e2epod.NewAgnhostPod(f.Namespace.Name, gitServerPodName, nil, nil, []v1.ContainerPort{{ContainerPort: int32(containerPort)}}, "fake-gitserver") gitServerPod.ObjectMeta.Labels = labels - f.PodClient().CreateSync(gitServerPod) + e2epod.NewPodClient(f).CreateSync(gitServerPod) // Portal IP and port httpPort := 2345 diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 5b0bdbf9b21..1185a928563 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) framework.ExpectNoError(err) @@ -78,8 +78,8 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] nodeKey = "mounted_flexvolume_expand_" + ns nodeLabelValue = ns nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue} - framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) - ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey) + e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) + ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey) test := testsuites.StorageClassTest{ Name: "flexvolume-resize", diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index ffea761df3f..f14443437f5 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) var err error node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) @@ -73,8 +73,8 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan nodeKey = "mounted_flexvolume_expand_" + ns nodeLabelValue = ns nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue} - framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) - ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey) + e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) + ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey) test := testsuites.StorageClassTest{ Name: "flexvolume-resize", diff --git a/test/e2e/storage/gke_local_ssd.go b/test/e2e/storage/gke_local_ssd.go index 40f5d3d89be..48ff3e4f784 100644 --- a/test/e2e/storage/gke_local_ssd.go +++ b/test/e2e/storage/gke_local_ssd.go @@ -20,10 +20,11 @@ import ( "fmt" "os/exec" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" @@ -65,7 +66,7 @@ func doTestWriteAndReadToLocalSsd(f *framework.Framework) { var msg string var out = []string{"hello world"} - f.TestContainerOutput(msg, pod, 0, out) + e2eoutput.TestContainerOutput(f, msg, pod, 0, out) } func testPodWithSsd(command string) *v1.Pod { diff --git a/test/e2e/storage/host_path_type.go b/test/e2e/storage/host_path_type.go index 58a3989b8b7..a14ea7804f5 100644 --- a/test/e2e/storage/host_path_type.go +++ b/test/e2e/storage/host_path_type.go @@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() { ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetDir = path.Join(hostBaseDir, "adir") ginkgo.By("Should automatically create a new directory 'adir' when HostPathType is HostPathDirectoryOrCreate") @@ -130,7 +130,7 @@ var _ = utils.SIGDescribe("HostPathType File [Slow]", func() { ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetFile = path.Join(hostBaseDir, "afile") ginkgo.By("Should automatically create a new file 'afile' when HostPathType is HostPathFileOrCreate") @@ -198,7 +198,7 @@ var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() { ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = f.PodClient().CreateSync(newHostPathTypeTestPodWithCommand(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate, fmt.Sprintf("nc -lU %s", path.Join(mountBaseDir, "asocket")))) + basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPodWithCommand(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate, fmt.Sprintf("nc -lU %s", path.Join(mountBaseDir, "asocket")))) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetSocket = path.Join(hostBaseDir, "asocket") }) @@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetCharDev = path.Join(hostBaseDir, "achardev") ginkgo.By("Create a character device for further testing") @@ -334,7 +334,7 @@ var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() { ginkgo.By("Create a pod for further testing") hostBaseDir = path.Join("/tmp", ns) mountBaseDir = "/mnt/test" - basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) + basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) targetBlockDev = path.Join(hostBaseDir, "ablkdev") ginkgo.By("Create a block device for further testing") diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index d695e9a59c9..46b2913d06a 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun e2eskipper.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) framework.ExpectNoError(err) @@ -72,8 +72,8 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun nodeKey = "mounted_volume_expand_" + ns nodeLabelValue = ns nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue} - framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) - ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey) + e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) + ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey) test := testsuites.StorageClassTest{ Name: "default", diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 58f3cf0579e..58c4afdaf08 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -490,7 +490,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { func countReadyNodes(c clientset.Interface, hostName types.NodeName) int { e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout) - framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout) + e2enode.WaitForAllNodesSchedulable(c, nodeStatusTimeout) nodes, err := e2enode.GetReadySchedulableNodes(c) framework.ExpectNoError(err) return len(nodes.Items) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index f6114b74f6e..33a232ba1d6 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -43,6 +43,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" @@ -802,7 +803,7 @@ func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig, func createPodWithFsGroupTest(config *localTestConfig, testVol *localTestVolume, fsGroup int64, expectedFsGroup int64) *v1.Pod { pod, err := createLocalPod(config, testVol, &fsGroup) framework.ExpectNoError(err) - _, err = framework.LookForStringInPodExec(config.ns, pod.Name, []string{"stat", "-c", "%g", volumeDir}, strconv.FormatInt(expectedFsGroup, 10), time.Second*3) + _, err = e2eoutput.LookForStringInPodExec(config.ns, pod.Name, []string{"stat", "-c", "%g", volumeDir}, strconv.FormatInt(expectedFsGroup, 10), time.Second*3) framework.ExpectNoError(err, "failed to get expected fsGroup %d on directory %s in pod %s", fsGroup, volumeDir, pod.Name) return pod } diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 892fb3138e6..9c63715c688 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/util/slice" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" @@ -53,7 +54,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.BeforeEach(func() { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) // Enforce binding only within test space via selector labels volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace} diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index f957f667802..bf69c49f8c2 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/util/slice" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -74,7 +75,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.BeforeEach(func() { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) ginkgo.By("Creating a PVC") prefix := "pvc-protection" diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 358bb806066..e668e93cb75 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" @@ -713,7 +714,7 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I // Check that all pods have the same content index := i + 1 ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index)) - _, err := framework.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute) + _, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute) framework.ExpectNoError(err, "failed: finding the contents of the block volume %s.", fileName) } else { fileName := "/mnt/volume1/index.html" @@ -721,7 +722,7 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I // Check that all pods have the same content index := i + 1 ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index)) - _, err := framework.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute) + _, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute) framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName) } } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 54f22f7868b..0e3b247d31c 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -35,6 +35,7 @@ import ( "k8s.io/component-helpers/storage/ephemeral" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" @@ -259,7 +260,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) if pattern.VolType != storageframework.GenericEphemeralVolume { commands := e2evolume.GenerateReadFileCmd(datapath) - _, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) + _, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) framework.ExpectNoError(err) } @@ -408,7 +409,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, }) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) commands := e2evolume.GenerateReadFileCmd(datapath) - _, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) + _, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) framework.ExpectNoError(err) ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy") diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index db1bcb0ca37..7616f3ed03c 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -35,8 +35,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" @@ -487,7 +489,7 @@ func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents}) + e2eoutput.TestContainerOutput(f, "atomic-volume-subpath", pod, 0, []string{contents}) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) err := e2epod.DeletePodWithWait(f.ClientSet, pod) @@ -670,7 +672,7 @@ func addMultipleWrites(container *v1.Container, file1 string, file2 string) { func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{ + e2eoutput.TestContainerOutput(f, "multi_subpath", pod, containerIndex, []string{ "content of file \"" + file1 + "\": mount-tester new file", "content of file \"" + file2 + "\": mount-tester new file", }) @@ -689,7 +691,7 @@ func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerInd ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - f.TestContainerOutput("subpath", pod, containerIndex, []string{ + e2eoutput.TestContainerOutput(f, "subpath", pod, containerIndex, []string{ "content of file \"" + file + "\": mount-tester new file", }) @@ -1040,5 +1042,5 @@ func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string, shell = "/bin/sh" option = "-c" } - return framework.RunKubectl(pod.Namespace, "exec", pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command) + return e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command) } diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 59c4af32325..bc35bacbc15 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" @@ -254,7 +255,7 @@ func testScriptInPod( } e2epod.SetNodeSelection(&pod.Spec, config.ClientNodeSelection) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) - f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName}) + e2eoutput.TestContainerOutput(f, "exec-volume-test", pod, 0, []string{fileName}) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) err := e2epod.DeletePodWithWait(f.ClientSet, pod) diff --git a/test/e2e/storage/utils/local.go b/test/e2e/storage/utils/local.go index 011ebda0c52..ef3e8188872 100644 --- a/test/e2e/storage/utils/local.go +++ b/test/e2e/storage/utils/local.go @@ -26,7 +26,7 @@ import ( "strings" "github.com/onsi/ginkgo/v2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" ) diff --git a/test/e2e/storage/volume_limits.go b/test/e2e/storage/volume_limits.go index 81620a4d538..d65a6d7aee5 100644 --- a/test/e2e/storage/volume_limits.go +++ b/test/e2e/storage/volume_limits.go @@ -18,7 +18,7 @@ package storage import ( "github.com/onsi/ginkgo/v2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/test/e2e/framework" @@ -37,7 +37,7 @@ var _ = utils.SIGDescribe("Volume limits", func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs("aws", "gce", "gke") c = f.ClientSet - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) }) ginkgo.It("should verify that all nodes have volume limits", func() { diff --git a/test/e2e/storage/vsphere/bootstrap.go b/test/e2e/storage/vsphere/bootstrap.go index 04cbd425f98..5b193571d96 100644 --- a/test/e2e/storage/vsphere/bootstrap.go +++ b/test/e2e/storage/vsphere/bootstrap.go @@ -18,9 +18,10 @@ package vsphere import ( "context" + "sync" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" - "sync" ) var once sync.Once diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 8992342abcc..79bf7fe5d51 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -49,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) }) ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index 0ae2f2c8bc7..74c73072674 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -68,7 +69,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele ns = f.Namespace.Name Bootstrap(f) nodeInfo = GetReadySchedulableRandomNodeInfo() - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) ssdlabels = make(map[string]string) ssdlabels["volume-type"] = "ssd" vvollabels = make(map[string]string) diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index aae8eb48d06..a8f140e3fc3 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -109,7 +109,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { nodeSelectorList = createNodeLabels(client, namespace, nodes) ginkgo.DeferCleanup(func() { for _, node := range nodes.Items { - framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey) + e2enode.RemoveLabelOffNode(client, node.Name, NodeLabelKey) } }) }) @@ -234,7 +234,7 @@ func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.No labelValue: labelVal, } nodeSelectorList = append(nodeSelectorList, nodeSelector) - framework.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal) + e2enode.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal) } return nodeSelectorList } diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 8c7c28230e7..3e2652290a5 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -41,7 +41,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -358,14 +360,14 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) { for _, filePath := range filePaths { - _, err := framework.RunKubectl(namespace, "exec", podName, "--", "/bin/ls", filePath) + _, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/ls", filePath) framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName)) } } func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) { for _, filePath := range filePaths { - err := framework.CreateEmptyFileOnPod(namespace, podName, filePath) + err := e2eoutput.CreateEmptyFileOnPod(namespace, podName, filePath) framework.ExpectNoError(err) } } @@ -383,7 +385,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste } // Verify Volumes are accessible filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) + _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) framework.ExpectNoError(err) } } @@ -819,7 +821,7 @@ func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []str // writeContentToPodFile writes the given content to the specified file. func writeContentToPodFile(namespace, podName, filePath, content string) error { - _, err := framework.RunKubectl(namespace, "exec", podName, + _, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath)) return err } @@ -827,7 +829,7 @@ func writeContentToPodFile(namespace, podName, filePath, content string) error { // expectFileContentToMatch checks if a given file contains the specified // content, else fails. func expectFileContentToMatch(namespace, podName, filePath, content string) { - _, err := framework.RunKubectl(namespace, "exec", podName, + _, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath)) framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName)) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index 7393b047b0b..82610d17335 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -18,6 +18,7 @@ package vsphere import ( "context" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 45718102b9b..14a2e1e01fb 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -76,8 +77,8 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { nodeName = GetReadySchedulableRandomNodeInfo().Name nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID()) nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue} - framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue) - ginkgo.DeferCleanup(framework.RemoveLabelOffNode, client, nodeName, NodeLabelKey) + e2enode.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue) + ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, client, nodeName, NodeLabelKey) }) ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 80837f0acbe..503119fb9ae 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -28,6 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -109,7 +110,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam // Create Pod and verify the persistent volume is accessible pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes) - _, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) + _, err := e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) framework.ExpectNoError(err) // Detach and delete volume diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 7243dd8c298..72f4eb06265 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -26,7 +26,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" @@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) nodes, err := e2enode.GetReadySchedulableNodes(client) framework.ExpectNoError(err) @@ -136,7 +136,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup nodeKeyValueLabel := make(map[string]string) nodeKeyValueLabel[labelKey] = nodeLabelValue nodeKeyValueLabelList = append(nodeKeyValueLabelList, nodeKeyValueLabel) - framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue) + e2enode.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue) } }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index 2501af11c49..9785d01b80e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(err) workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 30131d4be92..08a6a9755d3 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) if len(nodeList.Items) < 2 { diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 134e8bdff68..fb80df1c63b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" @@ -58,14 +59,14 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { Bootstrap(f) c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns) ginkgo.DeferCleanup(func() { if len(node1KeyValueLabel) > 0 { - framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey) + e2enode.RemoveLabelOffNode(c, node1Name, NodeLabelKey) } if len(node2KeyValueLabel) > 0 { - framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey) + e2enode.RemoveLabelOffNode(c, node2Name, NodeLabelKey) } }) nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) @@ -307,10 +308,10 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { // Create empty files on the mounted volumes on the pod to verify volume is writable ginkgo.By("Creating empty file on volume mounted on pod-A") - framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName) + e2eoutput.CreateEmptyFileOnPod(ns, podA.Name, podAFileName) ginkgo.By("Creating empty file volume mounted on pod-B") - framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName) + e2eoutput.CreateEmptyFileOnPod(ns, podB.Name, podBFileName) // Verify newly and previously created files present on the volume mounted on the pod ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A") @@ -337,12 +338,12 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) node1KeyValueLabel = make(map[string]string) node1KeyValueLabel[NodeLabelKey] = node1LabelValue - framework.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue) + e2enode.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue) node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) node2KeyValueLabel = make(map[string]string) node2KeyValueLabel[NodeLabelKey] = node2LabelValue - framework.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue) + e2enode.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue) return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index a77273706ce..42089c92447 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) nodes, err := e2enode.GetReadySchedulableNodes(client) framework.ExpectNoError(err) @@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodes.Items[i].Name) nodeName := nodes.Items[i].Name nodeLabel := "vsphere_e2e_" + string(uuid.NewUUID()) - framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel) + e2enode.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel) vcHost := nodeInfo.VSphere.Config.Hostname vcNodesMap[vcHost] = append(vcNodesMap[vcHost], node{ diff --git a/test/e2e/upgrades/apps/cassandra.go b/test/e2e/upgrades/apps/cassandra.go index f11211bd0ae..9c267696043 100644 --- a/test/e2e/upgrades/apps/cassandra.go +++ b/test/e2e/upgrades/apps/cassandra.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/upgrades" @@ -69,7 +70,7 @@ func cassandraKubectlCreate(ns, file string) { framework.Fail(err.Error()) } input := string(data) - framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, input, "create", "-f", "-") } // Setup creates a Cassandra StatefulSet and a PDB. It also brings up a tester diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 36daec33038..984e0a0d094 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -18,6 +18,7 @@ package apps import ( "context" + "github.com/onsi/ginkgo/v2" appsv1 "k8s.io/api/apps/v1" diff --git a/test/e2e/upgrades/apps/etcd.go b/test/e2e/upgrades/apps/etcd.go index c9559f4e9d3..aedb7d3291a 100644 --- a/test/e2e/upgrades/apps/etcd.go +++ b/test/e2e/upgrades/apps/etcd.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/upgrades" @@ -68,7 +69,7 @@ func kubectlCreate(ns, file string) { framework.Fail(err.Error()) } input := string(data) - framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, input, "create", "-f", "-") } // Setup creates etcd statefulset and then verifies that the etcd is writable. diff --git a/test/e2e/upgrades/apps/mysql.go b/test/e2e/upgrades/apps/mysql.go index 39a0e56e63e..6e3d934bc29 100644 --- a/test/e2e/upgrades/apps/mysql.go +++ b/test/e2e/upgrades/apps/mysql.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/upgrades" @@ -70,7 +71,7 @@ func mysqlKubectlCreate(ns, file string) { framework.Fail(err.Error()) } input := string(data) - framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-") + e2ekubectl.RunKubectlOrDieInput(ns, input, "create", "-f", "-") } func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { diff --git a/test/e2e/upgrades/apps/replicasets.go b/test/e2e/upgrades/apps/replicasets.go index e1a2bbc80b9..c20e6089f7d 100644 --- a/test/e2e/upgrades/apps/replicasets.go +++ b/test/e2e/upgrades/apps/replicasets.go @@ -19,6 +19,7 @@ package apps import ( "context" "fmt" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index 72d5e539b94..ae0c0105763 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -18,10 +18,11 @@ package apps import ( "context" + "github.com/onsi/ginkgo/v2" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/version" diff --git a/test/e2e/upgrades/node/apparmor.go b/test/e2e/upgrades/node/apparmor.go index 80daa6f942f..e8ca5093e8f 100644 --- a/test/e2e/upgrades/node/apparmor.go +++ b/test/e2e/upgrades/node/apparmor.go @@ -18,10 +18,12 @@ package node import ( "context" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2esecurity "k8s.io/kubernetes/test/e2e/framework/security" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/upgrades" @@ -61,7 +63,7 @@ func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) { // Create the initial test pod. ginkgo.By("Creating a long-running AppArmor enabled pod.") - t.pod = e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), false, false) + t.pod = e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, false) // Verify initial state. t.verifyNodesAppArmorEnabled(f) @@ -88,7 +90,7 @@ func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod") - pod, err := f.PodClient().Get(context.TODO(), t.pod.Name, metav1.GetOptions{}) + pod, err := e2epod.NewPodClient(f).Get(context.TODO(), t.pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Should be able to get pod") framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running") gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running") @@ -97,7 +99,7 @@ func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) { ginkgo.By("Verifying an AppArmor profile is enforced for a new pod") - e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), false, true) + e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true) } func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) { diff --git a/test/e2e/upgrades/node/configmaps.go b/test/e2e/upgrades/node/configmaps.go index 083f0cffd8e..02d7ff6d576 100644 --- a/test/e2e/upgrades/node/configmaps.go +++ b/test/e2e/upgrades/node/configmaps.go @@ -20,10 +20,11 @@ import ( "context" "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" "k8s.io/kubernetes/test/e2e/upgrades" imageutils "k8s.io/kubernetes/test/utils/image" @@ -147,8 +148,8 @@ func (t *ConfigMapUpgradeTest) testPod(f *framework.Framework) { "content of file \"/etc/configmap-volume/data\": some configmap data", "mode of file \"/etc/configmap-volume/data\": -rw-r--r--", } - f.TestContainerOutput("volume consume configmap", pod, 0, expectedOutput) + e2eoutput.TestContainerOutput(f, "volume consume configmap", pod, 0, expectedOutput) expectedOutput = []string{"CONFIGMAP_DATA=some configmap data"} - f.TestContainerOutput("env consume configmap", pod, 1, expectedOutput) + e2eoutput.TestContainerOutput(f, "env consume configmap", pod, 1, expectedOutput) } diff --git a/test/e2e/upgrades/node/secrets.go b/test/e2e/upgrades/node/secrets.go index bd917729931..5f3d185cb2c 100644 --- a/test/e2e/upgrades/node/secrets.go +++ b/test/e2e/upgrades/node/secrets.go @@ -20,10 +20,11 @@ import ( "context" "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" "k8s.io/kubernetes/test/e2e/upgrades" imageutils "k8s.io/kubernetes/test/utils/image" @@ -144,8 +145,8 @@ func (t *SecretUpgradeTest) testPod(f *framework.Framework) { "mode of file \"/etc/secret-volume/data\": -rw-r--r--", } - f.TestContainerOutput("volume consume secrets", pod, 0, expectedOutput) + e2eoutput.TestContainerOutput(f, "volume consume secrets", pod, 0, expectedOutput) expectedOutput = []string{"SECRET_DATA=keep it secret"} - f.TestContainerOutput("env consume secrets", pod, 1, expectedOutput) + e2eoutput.TestContainerOutput(f, "env consume secrets", pod, 1, expectedOutput) } diff --git a/test/e2e/upgrades/node/sysctl.go b/test/e2e/upgrades/node/sysctl.go index 9c032eb173e..f58bbfdd169 100644 --- a/test/e2e/upgrades/node/sysctl.go +++ b/test/e2e/upgrades/node/sysctl.go @@ -22,13 +22,15 @@ import ( "github.com/onsi/ginkgo/v2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/sysctl" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" "k8s.io/kubernetes/test/e2e/upgrades" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -83,12 +85,12 @@ func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod safeSysctl := "net.ipv4.ip_local_port_range" safeSysctlValue := "1024 1042" sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue}) - validPod := f.PodClient().Create(t.validPod) + validPod := e2epod.NewPodClient(f).Create(t.validPod) ginkgo.By("Making sure the valid pod launches") - _, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod) + _, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(t.validPod) framework.ExpectNoError(err) - f.TestContainerOutput("pod with safe sysctl launched", t.validPod, 0, []string{fmt.Sprintf("%s = %s", safeSysctl, safeSysctlValue)}) + e2eoutput.TestContainerOutput(f, "pod with safe sysctl launched", t.validPod, 0, []string{fmt.Sprintf("%s = %s", safeSysctl, safeSysctlValue)}) return validPod } @@ -98,10 +100,10 @@ func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framewor invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{ "fs.mount-max": "1000000", }) - invalidPod = f.PodClient().Create(invalidPod) + invalidPod = e2epod.NewPodClient(f).Create(invalidPod) ginkgo.By("Making sure the invalid pod failed") - ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod) + ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(invalidPod) framework.ExpectNoError(err) framework.ExpectEqual(ev.Reason, sysctl.ForbiddenReason) diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index 3fe87029bc4..e907726bad5 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -21,6 +21,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/upgrades" @@ -83,5 +84,5 @@ func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) { func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) { pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd) expectedOutput := []string{pvTestData} - f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput) + e2eoutput.TestContainerOutput(f, "pod consumes pv", pod, 0, expectedOutput) } diff --git a/test/e2e/windows/cpu_limits.go b/test/e2e/windows/cpu_limits.go index e80b34da15b..33671ee698b 100644 --- a/test/e2e/windows/cpu_limits.go +++ b/test/e2e/windows/cpu_limits.go @@ -19,15 +19,17 @@ package windows import ( "context" + "time" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" - "time" "github.com/onsi/ginkgo/v2" ) @@ -43,10 +45,10 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() { ginkgo.It("should not be exceeded after waiting 2 minutes", func() { ginkgo.By("Creating one pod with limit set to '0.5'") podsDecimal := newCPUBurnPods(1, powershellImage, "0.5", "1Gi") - f.PodClient().CreateBatch(podsDecimal) + e2epod.NewPodClient(f).CreateBatch(podsDecimal) ginkgo.By("Creating one pod with limit set to '500m'") podsMilli := newCPUBurnPods(1, powershellImage, "500m", "1Gi") - f.PodClient().CreateBatch(podsMilli) + e2epod.NewPodClient(f).CreateBatch(podsMilli) ginkgo.By("Waiting 2 minutes") time.Sleep(2 * time.Minute) ginkgo.By("Ensuring pods are still running") diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 957a1f5701f..b808147eab8 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -165,7 +165,7 @@ func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, inter createTimes := make(map[string]metav1.Time) for _, pod := range pods { createTimes[pod.ObjectMeta.Name] = metav1.Now() - go f.PodClient().Create(pod) + go e2epod.NewPodClient(f).Create(pod) time.Sleep(interval) } return createTimes @@ -273,7 +273,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) + err := e2epod.NewPodClient(f).Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) err = e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), diff --git a/test/e2e/windows/device_plugin.go b/test/e2e/windows/device_plugin.go index bc6805168e3..f6ce4ae46a5 100644 --- a/test/e2e/windows/device_plugin.go +++ b/test/e2e/windows/device_plugin.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -115,20 +116,20 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin] Device Plugin", func() { //based on the windows version running the test. dxdiagDirectxVersion := "DirectX Version: DirectX 12" defaultNs := f.Namespace.Name - _, dxdiagDirectxVersionErr := framework.LookForStringInPodExec(defaultNs, windowsPod.Name, dxdiagCommand, dxdiagDirectxVersion, time.Minute) + _, dxdiagDirectxVersionErr := e2eoutput.LookForStringInPodExec(defaultNs, windowsPod.Name, dxdiagCommand, dxdiagDirectxVersion, time.Minute) framework.ExpectNoError(dxdiagDirectxVersionErr, "failed: didn't find directX version dxdiag output.") dxdiagDdiVersion := "DDI Version: 12" - _, dxdiagDdiVersionErr := framework.LookForStringInPodExec(defaultNs, windowsPod.Name, dxdiagCommand, dxdiagDdiVersion, time.Minute) + _, dxdiagDdiVersionErr := e2eoutput.LookForStringInPodExec(defaultNs, windowsPod.Name, dxdiagCommand, dxdiagDdiVersion, time.Minute) framework.ExpectNoError(dxdiagDdiVersionErr, "failed: didn't find DDI version in dxdiag output.") dxdiagVendorID := "Vendor ID: 0x" - _, dxdiagVendorIDErr := framework.LookForStringInPodExec(defaultNs, windowsPod.Name, dxdiagCommand, dxdiagVendorID, time.Minute) + _, dxdiagVendorIDErr := e2eoutput.LookForStringInPodExec(defaultNs, windowsPod.Name, dxdiagCommand, dxdiagVendorID, time.Minute) framework.ExpectNoError(dxdiagVendorIDErr, "failed: didn't find vendorID in dxdiag output.") envVarCommand := []string{"cmd.exe", "/c", "set", "DIRECTX_GPU_Name"} envVarDirectxGpuName := "DIRECTX_GPU_Name=" - _, envVarDirectxGpuNameErr := framework.LookForStringInPodExec(defaultNs, windowsPod.Name, envVarCommand, envVarDirectxGpuName, time.Minute) + _, envVarDirectxGpuNameErr := e2eoutput.LookForStringInPodExec(defaultNs, windowsPod.Name, envVarCommand, envVarDirectxGpuName, time.Minute) framework.ExpectNoError(envVarDirectxGpuNameErr, "failed: didn't find expected environment variable.") }) }) diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index 7c4ae40ff97..e099e254fa0 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -77,7 +77,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { // This isn't the best 'test' but it is a great diagnostic, see later test for the 'real' test. ginkgo.By("Calling ipconfig to get debugging info for this pod's DNS and confirm that a dns server 1.1.1.1 can be injected, along with ") cmd := []string{"ipconfig", "/all"} - stdout, _, err := f.ExecWithOptions(framework.ExecOptions{ + stdout, _, err := e2epod.ExecWithOptions(f, e2epod.ExecOptions{ Command: cmd, Namespace: f.Namespace.Name, PodName: testPod.Name, @@ -102,7 +102,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { // TODO @jayunit100 add ResolveHost to agn images cmd = []string{"curl.exe", "-k", "https://kubernetezzzzzzzz:443"} - stdout, _, err = f.ExecWithOptions(framework.ExecOptions{ + stdout, _, err = e2epod.ExecWithOptions(f, e2epod.ExecOptions{ Command: cmd, Namespace: f.Namespace.Name, PodName: testPod.Name, @@ -117,7 +117,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { ginkgo.By("Verifying that injected dns records for 'kubernetes' resolve to the valid ip address") cmd = []string{"curl.exe", "-k", "https://kubernetes:443"} - stdout, _, err = f.ExecWithOptions(framework.ExecOptions{ + stdout, _, err = e2epod.ExecWithOptions(f, e2epod.ExecOptions{ Command: cmd, Namespace: f.Namespace.Name, PodName: testPod.Name, diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index b08be85d904..41d686f862b 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -57,6 +57,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" @@ -288,7 +289,7 @@ func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) strin }, }, } - f.PodClient().CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(pod) output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "cmd", "/S", "/C", fmt.Sprintf("type %s", gmsaCrdManifestPath)) if err != nil { @@ -311,10 +312,10 @@ func deployGmsaWebhook(f *framework.Framework) (func(), error) { // regardless of whether the deployment succeeded, let's do a best effort at cleanup cleanUpFunc := func() { framework.Logf("Best effort clean up of the webhook:\n") - stdout, err := framework.RunKubectl("", "delete", "CustomResourceDefinition", "gmsacredentialspecs.windows.k8s.io") + stdout, err := e2ekubectl.RunKubectl("", "delete", "CustomResourceDefinition", "gmsacredentialspecs.windows.k8s.io") framework.Logf("stdout:%s\nerror:%s", stdout, err) - stdout, err = framework.RunKubectl("", "delete", "CertificateSigningRequest", fmt.Sprintf("%s.%s", webHookName, webHookNamespace)) + stdout, err = e2ekubectl.RunKubectl("", "delete", "CertificateSigningRequest", fmt.Sprintf("%s.%s", webHookName, webHookNamespace)) framework.Logf("stdout:%s\nerror:%s", stdout, err) stdout, err = runKubectlExecInNamespace(deployerNamespace, deployerName, "--", "kubectl", "delete", "-f", "/manifests.yml") @@ -362,7 +363,7 @@ func deployGmsaWebhook(f *framework.Framework) (func(), error) { }, }, } - f.PodClient().CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(pod) // Wait for the Webhook deployment to become ready. The deployer pod takes a few seconds to initialize and create resources err := waitForDeployment(func() (*appsv1.Deployment, error) { @@ -395,7 +396,7 @@ func createGmsaCustomResource(ns string, crdManifestContents string) (func(), er defer tempFile.Close() cleanUpFunc = func() { - framework.RunKubectl(ns, "delete", "--filename", tempFile.Name()) + e2ekubectl.RunKubectl(ns, "delete", "--filename", tempFile.Name()) os.Remove(tempFile.Name()) } @@ -405,7 +406,7 @@ func createGmsaCustomResource(ns string, crdManifestContents string) (func(), er return cleanUpFunc, err } - output, err := framework.RunKubectl(ns, "apply", "--filename", tempFile.Name()) + output, err := e2ekubectl.RunKubectl(ns, "apply", "--filename", tempFile.Name()) if err != nil { err = fmt.Errorf("unable to create custom resource, output:\n%s: %w", output, err) } @@ -535,14 +536,14 @@ func createPodWithGmsa(f *framework.Framework, serviceAccountName string) string }, }, } - f.PodClient().CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(pod) return podName } func runKubectlExecInNamespace(namespace string, args ...string) (string, error) { namespaceOption := fmt.Sprintf("--namespace=%s", namespace) - return framework.RunKubectl(namespace, append([]string{"exec", namespaceOption}, args...)...) + return e2ekubectl.RunKubectl(namespace, append([]string{"exec", namespaceOption}, args...)...) } func getGmsaDomainIP(f *framework.Framework, podName string) string { diff --git a/test/e2e/windows/gmsa_kubelet.go b/test/e2e/windows/gmsa_kubelet.go index 946516be51d..57c3658612d 100644 --- a/test/e2e/windows/gmsa_kubelet.go +++ b/test/e2e/windows/gmsa_kubelet.go @@ -29,6 +29,8 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -92,7 +94,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Kubelet [Slow]", func() { } ginkgo.By("creating a pod with correct GMSA specs") - f.PodClient().CreateSync(pod) + e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("checking the domain reported by nltest in the containers") namespaceOption := fmt.Sprintf("--namespace=%s", f.Namespace.Name) @@ -110,7 +112,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Kubelet [Slow]", func() { // note that the "eventually" part seems to be needed to account for the fact that powershell containers // are a bit slow to become responsive, even when docker reports them as running. gomega.Eventually(func() bool { - output, err = framework.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN") + output, err = e2ekubectl.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN") return err == nil }, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) diff --git a/test/e2e/windows/host_process.go b/test/e2e/windows/host_process.go index e1cfd156e65..c277baea55d 100644 --- a/test/e2e/windows/host_process.go +++ b/test/e2e/windows/host_process.go @@ -122,10 +122,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) ginkgo.By("Waiting for pod to run") - f.PodClient().WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( @@ -174,10 +174,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) ginkgo.By("Waiting for pod to run") - f.PodClient().WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( @@ -412,10 +412,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, }, } - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) ginkgo.By(fmt.Sprintf("Waiting for pod '%s' to run", podName)) - f.PodClient().WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( @@ -487,10 +487,10 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi podAndContainerName := "host-process-volume-mounts" pod := makeTestPodWithVolumeMounts(podAndContainerName) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) ginkgo.By("Waiting for pod to run") - f.PodClient().WaitForFinish(podAndContainerName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(podAndContainerName, 3*time.Minute) logs, err := e2epod.GetPodLogs(f.ClientSet, ns.Name, podAndContainerName, podAndContainerName) framework.ExpectNoError(err, "Error getting pod logs") @@ -552,8 +552,8 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - f.PodClient().Create(pod) - f.PodClient().WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) ginkgo.By("Scheduling a pod with a HostProcess container that will fail") podName = "host-process-metrics-pod-failing-container" @@ -581,8 +581,8 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }, } - f.PodClient().Create(pod) - f.PodClient().WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).Create(pod) + e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) ginkgo.By("Getting subsequent kubelet metrics values") diff --git a/test/e2e/windows/hybrid_network.go b/test/e2e/windows/hybrid_network.go index 4700423e3b6..ba501a557dc 100644 --- a/test/e2e/windows/hybrid_network.go +++ b/test/e2e/windows/hybrid_network.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" admissionapi "k8s.io/pod-security-admission/api" @@ -56,13 +57,13 @@ var _ = SIGDescribe("Hybrid cluster network", func() { linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) ginkgo.By("creating a linux pod and waiting for it to be running") - linuxPod = f.PodClient().CreateSync(linuxPod) + linuxPod = e2epod.NewPodClient(f).CreateSync(linuxPod) windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) windowsPod.Spec.Containers[0].Args = []string{"test-webserver"} ginkgo.By("creating a windows pod and waiting for it to be running") - windowsPod = f.PodClient().CreateSync(windowsPod) + windowsPod = e2epod.NewPodClient(f).CreateSync(windowsPod) ginkgo.By("verifying pod internal connectivity to the cluster dataplane") @@ -77,7 +78,7 @@ var _ = SIGDescribe("Hybrid cluster network", func() { ginkgo.It("should provide Internet connection for Linux containers using DNS [Feature:Networking-DNS]", func() { linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) ginkgo.By("creating a linux pod and waiting for it to be running") - linuxPod = f.PodClient().CreateSync(linuxPod) + linuxPod = e2epod.NewPodClient(f).CreateSync(linuxPod) ginkgo.By("verifying pod external connectivity to the internet") @@ -88,7 +89,7 @@ var _ = SIGDescribe("Hybrid cluster network", func() { ginkgo.It("should provide Internet connection for Windows containers using DNS [Feature:Networking-DNS]", func() { windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) ginkgo.By("creating a windows pod and waiting for it to be running") - windowsPod = f.PodClient().CreateSync(windowsPod) + windowsPod = e2epod.NewPodClient(f).CreateSync(windowsPod) ginkgo.By("verifying pod external connectivity to the internet") @@ -109,7 +110,7 @@ func assertConsistentConnectivity(f *framework.Framework, podName string, os str connChecker := func() error { ginkgo.By(fmt.Sprintf("checking connectivity of %s-container in %s", os, podName)) // TODO, we should be retrying this similar to what is done in DialFromNode, in the test/e2e/networking/networking.go tests - stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, os+"-container", cmd...) + stdout, stderr, err := e2epod.ExecCommandInContainerWithFullOutput(f, podName, os+"-container", cmd...) if err != nil { framework.Logf("Encountered error while running command: %v.\nStdout: %s\nStderr: %s\nErr: %v", cmd, stdout, stderr, err) } diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index baa313c7b22..300fdc3ac7b 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -54,7 +54,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() { ginkgo.By("Scheduling 10 pods") powershellImage := imageutils.GetConfig(imageutils.BusyBox) pods := newKubeletStatsTestPods(10, powershellImage, targetNode.Name) - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute @@ -144,7 +144,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.By("Scheduling 3 pods") powershellImage := imageutils.GetConfig(imageutils.BusyBox) pods := newKubeletStatsTestPods(3, powershellImage, targetNode.Name) - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute diff --git a/test/e2e/windows/reboot_node.go b/test/e2e/windows/reboot_node.go index f536b8fc46f..e8ebe32f151 100644 --- a/test/e2e/windows/reboot_node.go +++ b/test/e2e/windows/reboot_node.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -73,7 +74,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV } agnPod.Spec.Containers[0].Args = []string{"test-webserver"} ginkgo.By("creating a windows pod and waiting for it to be running") - agnPod = f.PodClient().CreateSync(agnPod) + agnPod = e2epod.NewPodClient(f).CreateSync(agnPod) // Create Linux pod to ping the windows pod linuxBusyBoxImage := imageutils.GetE2EImage(imageutils.Nginx) @@ -106,7 +107,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV }, } ginkgo.By("Waiting for the Linux pod to run") - nginxPod = f.PodClient().CreateSync(nginxPod) + nginxPod = e2epod.NewPodClient(f).CreateSync(nginxPod) ginkgo.By("checking connectivity to 8.8.8.8 53 (google.com) from Linux") assertConsistentConnectivity(f, nginxPod.ObjectMeta.Name, "linux", linuxCheck("8.8.8.8", 53)) @@ -155,10 +156,10 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV }, } - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) ginkgo.By("Waiting for pod to run") - f.PodClient().WaitForFinish(podName, 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish(podName, 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( @@ -238,10 +239,10 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV }, } - f.PodClient().Create(checkPod) + e2epod.NewPodClient(f).Create(checkPod) ginkgo.By("Waiting for pod to run") - f.PodClient().WaitForFinish("check-reboot-pod", 3*time.Minute) + e2epod.NewPodClient(f).WaitForFinish("check-reboot-pod", 3*time.Minute) ginkgo.By("Then ensuring pod finished running successfully") p, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get( diff --git a/test/e2e/windows/security_context.go b/test/e2e/windows/security_context.go index 8f9e36c616d..7c61e1324a1 100644 --- a/test/e2e/windows/security_context.go +++ b/test/e2e/windows/security_context.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -46,15 +47,15 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { ginkgo.It("should be able create pods and run containers with a given username", func() { ginkgo.By("Creating 2 pods: 1 with the default user, and one with a custom one.") podDefault := runAsUserNamePod(nil) - f.TestContainerOutput("check default user", podDefault, 0, []string{"ContainerUser"}) + e2eoutput.TestContainerOutput(f, "check default user", podDefault, 0, []string{"ContainerUser"}) podUserName := runAsUserNamePod(toPtr("ContainerAdministrator")) - f.TestContainerOutput("check set user", podUserName, 0, []string{"ContainerAdministrator"}) + e2eoutput.TestContainerOutput(f, "check set user", podUserName, 0, []string{"ContainerAdministrator"}) }) ginkgo.It("should not be able to create pods with unknown usernames at Pod level", func() { ginkgo.By("Creating a pod with an invalid username") - podInvalid := f.PodClient().Create(runAsUserNamePod(toPtr("FooLish"))) + podInvalid := e2epod.NewPodClient(f).Create(runAsUserNamePod(toPtr("FooLish"))) failedSandboxEventSelector := fields.Set{ "involvedObject.kind": "Pod", @@ -82,7 +83,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { } framework.Logf("No Sandbox error found. Looking for failure in workload pods") - pod, err := f.PodClient().Get(context.Background(), podInvalid.Name, metav1.GetOptions{}) + pod, err := e2epod.NewPodClient(f).Get(context.Background(), podInvalid.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Error retrieving pod: %s", err) return false @@ -103,12 +104,12 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { ginkgo.By("Creating a pod with an invalid username at container level and pod running as ContainerUser") p := runAsUserNamePod(toPtr("FooLish")) p.Spec.SecurityContext.WindowsOptions.RunAsUserName = toPtr("ContainerUser") - podInvalid := f.PodClient().Create(p) + podInvalid := e2epod.NewPodClient(f).Create(p) framework.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name) framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podInvalid.Name, "", f.Namespace.Name)) - podInvalid, _ = f.PodClient().Get(context.TODO(), podInvalid.Name, metav1.GetOptions{}) + podInvalid, _ = e2epod.NewPodClient(f).Get(context.TODO(), podInvalid.Name, metav1.GetOptions{}) podTerminatedReason := testutils.TerminatedContainers(podInvalid)[runAsUserNameContainerName] if podTerminatedReason != "ContainerCannotRun" && podTerminatedReason != "StartError" { framework.Failf("The container terminated reason was supposed to be: 'ContainerCannotRun' or 'StartError', not: '%q'", podTerminatedReason) @@ -126,8 +127,8 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { Command: []string{"cmd", "/S", "/C", "echo %username%"}, }) - f.TestContainerOutput("check overridden username", pod, 0, []string{"ContainerUser"}) - f.TestContainerOutput("check pod SecurityContext username", pod, 1, []string{"ContainerAdministrator"}) + e2eoutput.TestContainerOutput(f, "check overridden username", pod, 0, []string{"ContainerUser"}) + e2eoutput.TestContainerOutput(f, "check pod SecurityContext username", pod, 1, []string{"ContainerAdministrator"}) }) ginkgo.It("should ignore Linux Specific SecurityContext if set", func() { @@ -164,7 +165,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { framework.ExpectNoError(err, "Error creating pod") ginkgo.By("Waiting for pod to finish") - event, err := f.PodClient().WaitForErrorEventOrSuccess(podInvalid) + event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(podInvalid) framework.ExpectNoError(err) framework.ExpectNotEqual(event, nil, "event should not be empty") framework.Logf("Got event: %v", event) @@ -182,7 +183,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { framework.ExpectNoError(err, "Error creating pod") ginkgo.By("Waiting for pod to finish") - event, err := f.PodClient().WaitForErrorEventOrSuccess(podInvalid) + event, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(podInvalid) framework.ExpectNoError(err) framework.ExpectNotEqual(event, nil, "event should not be empty") framework.Logf("Got event: %v", event) diff --git a/test/e2e/windows/service.go b/test/e2e/windows/service.go index e511b5f632f..3b7de33834e 100644 --- a/test/e2e/windows/service.go +++ b/test/e2e/windows/service.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" admissionapi "k8s.io/pod-security-admission/api" @@ -73,7 +74,7 @@ var _ = SIGDescribe("Services", func() { //using hybrid_network methods ginkgo.By("creating Windows testing Pod") testPod := createTestPod(f, windowsBusyBoximage, windowsOS) - testPod = f.PodClient().CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(testPod) ginkgo.By("verifying that pod has the correct nodeSelector") // Admission controllers may sometimes do the wrong thing diff --git a/test/e2e/windows/volumes.go b/test/e2e/windows/volumes.go index 6c08a50ed41..8891c2638b6 100644 --- a/test/e2e/windows/volumes.go +++ b/test/e2e/windows/volumes.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -96,14 +97,14 @@ func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath s "kubernetes.io/os": "windows", } - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("verifying that pod has the correct nodeSelector") framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows") cmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath} ginkgo.By("verifying that pod will get an error when writing to a volume that is readonly") - _, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...) + _, stderr, _ := e2epod.ExecCommandInContainerWithFullOutput(f, podName, containerName, cmd...) framework.ExpectEqual(stderr, "Access is denied.") } @@ -130,24 +131,24 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol } pod.Spec.Containers = append(pod.Spec.Containers, rwcontainer) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("verifying that pod has the correct nodeSelector") framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows") ginkgo.By("verifying that pod can write to a volume with read/write access") writecmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath} - stdoutRW, stderrRW, errRW := f.ExecCommandInContainerWithFullOutput(podName, rwcontainerName, writecmd...) + stdoutRW, stderrRW, errRW := e2epod.ExecCommandInContainerWithFullOutput(f, podName, rwcontainerName, writecmd...) msg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", writecmd, stdoutRW, stderrRW) framework.ExpectNoError(errRW, msg) ginkgo.By("verifying that pod will get an error when writing to a volume that is readonly") - _, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, writecmd...) + _, stderr, _ := e2epod.ExecCommandInContainerWithFullOutput(f, podName, containerName, writecmd...) framework.ExpectEqual(stderr, "Access is denied.") ginkgo.By("verifying that pod can read from a volume that is readonly") readcmd := []string{"cmd", "/c", "type", filePath} - readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...) + readout, readerr, err := e2epod.ExecCommandInContainerWithFullOutput(f, podName, containerName, readcmd...) readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr) framework.ExpectEqual(readout, "windows-volume-test") framework.ExpectNoError(err, readmsg) diff --git a/test/e2e_kubeadm/bootstrap_token_test.go b/test/e2e_kubeadm/bootstrap_token_test.go index cf8da161d7a..525f597f14a 100644 --- a/test/e2e_kubeadm/bootstrap_token_test.go +++ b/test/e2e_kubeadm/bootstrap_token_test.go @@ -18,6 +18,7 @@ package kubeadm import ( "context" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e_kubeadm/controlplane_nodes_test.go b/test/e2e_kubeadm/controlplane_nodes_test.go index 2f54c0a99b2..845ad615efa 100644 --- a/test/e2e_kubeadm/controlplane_nodes_test.go +++ b/test/e2e_kubeadm/controlplane_nodes_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -57,7 +58,7 @@ var _ = Describe("control-plane node", func() { // checks that the control-plane nodes have the expected taints for _, cp := range controlPlanes.Items { - framework.ExpectNodeHasTaint(f.ClientSet, cp.GetName(), &corev1.Taint{Key: controlPlaneLabel, Effect: corev1.TaintEffectNoSchedule}) + e2enode.ExpectNodeHasTaint(f.ClientSet, cp.GetName(), &corev1.Taint{Key: controlPlaneLabel, Effect: corev1.TaintEffectNoSchedule}) } }) }) diff --git a/test/e2e_kubeadm/util.go b/test/e2e_kubeadm/util.go index 94bde52745b..7f8688a8fed 100644 --- a/test/e2e_kubeadm/util.go +++ b/test/e2e_kubeadm/util.go @@ -18,6 +18,7 @@ package kubeadm import ( "context" + appsv1 "k8s.io/api/apps/v1" authv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index fea1653791b..72a4d811e02 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -161,11 +161,11 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. w := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return f.PodClient().List(context.TODO(), options) + return e2epod.NewPodClient(f).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return f.PodClient().Watch(context.TODO(), options) + return e2epod.NewPodClient(f).Watch(context.TODO(), options) }, } preconditionFunc := func(store cache.Store) (bool, error) { @@ -202,7 +202,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. }) framework.ExpectNoError(err) } - p, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return p.Status } @@ -224,7 +224,7 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod { RestartPolicy: v1.RestartPolicyNever, }, } - return f.PodClient().Create(pod) + return e2epod.NewPodClient(f).Create(pod) } func expectSoftRejection(status v1.PodStatus) { diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index a9a298af702..fafb9eaf07d 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -28,7 +28,7 @@ import ( "strconv" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" diff --git a/test/e2e_node/checkpoint_container.go b/test/e2e_node/checkpoint_container.go index a89260ab55b..0adfac8d264 100644 --- a/test/e2e_node/checkpoint_container.go +++ b/test/e2e_node/checkpoint_container.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -68,7 +69,7 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("will checkpoint a container out of a pod", func() { ginkgo.By("creating a target pod") - podClient := f.PodClient() + podClient := e2epod.NewPodClient(f) pod := podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "checkpoint-container-pod"}, Spec: v1.PodSpec{ diff --git a/test/e2e_node/container_log_rotation_test.go b/test/e2e_node/container_log_rotation_test.go index 485aac08db3..951b63ada1f 100644 --- a/test/e2e_node/container_log_rotation_test.go +++ b/test/e2e_node/container_log_rotation_test.go @@ -25,6 +25,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubelogs "k8s.io/kubernetes/pkg/kubelet/logs" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -70,7 +71,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() }, }, } - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("get container log path") framework.ExpectEqual(len(pod.Status.ContainerStatuses), 1) id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index cad50e9cffe..bfba5a66d20 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -106,7 +107,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { framework.ExpectNoError(err, "failed to list all pause processes on the node") existingPausePIDSet := sets.NewInt(existingPausePIDs...) - podClient := f.PodClient() + podClient := e2epod.NewPodClient(f) podName := "besteffort" + string(uuid.NewUUID()) podClient.Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -173,7 +174,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { }) }) ginkgo.It("guaranteed container's oom-score-adj should be -998", func() { - podClient := f.PodClient() + podClient := e2epod.NewPodClient(f) podName := "guaranteed" + string(uuid.NewUUID()) podClient.Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -214,7 +215,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { }) ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func() { - podClient := f.PodClient() + podClient := e2epod.NewPodClient(f) podName := "burstable" + string(uuid.NewUUID()) podClient.Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 4f2e24c6373..12c95352883 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -89,7 +89,7 @@ func deletePodSyncByName(f *framework.Framework, podName string) { delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(podName, delOpts, e2epod.DefaultPodDeletionTimeout) } func deletePods(f *framework.Framework, podNames []string) { @@ -243,7 +243,7 @@ func runGuPodTest(f *framework.Framework, cpuCount int) { }, } pod = makeCPUManagerPod("gu-pod", ctnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("checking if the expected cpuset was assigned") // any full CPU is fine - we cannot nor we should predict which one, though @@ -279,7 +279,7 @@ func runNonGuPodTest(f *framework.Framework, cpuCap int64) { }, } pod = makeCPUManagerPod("non-gu-pod", ctnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("checking if the expected cpuset was assigned") expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1) @@ -287,7 +287,7 @@ func runNonGuPodTest(f *framework.Framework, cpuCap int64) { if cpuCap == 1 { expAllowedCPUsListRegex = "^0\n$" } - err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) @@ -313,7 +313,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 }, } pod1 = makeCPUManagerPod("gu-pod", ctnAttrs) - pod1 = f.PodClient().CreateSync(pod1) + pod1 = e2epod.NewPodClient(f).CreateSync(pod1) ctnAttrs = []ctnAttribute{ { @@ -323,7 +323,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 }, } pod2 = makeCPUManagerPod("non-gu-pod", ctnAttrs) - pod2 = f.PodClient().CreateSync(pod2) + pod2 = e2epod.NewPodClient(f).CreateSync(pod2) ginkgo.By("checking if the expected cpuset was assigned") cpu1 = 1 @@ -337,7 +337,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 } } expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1) - err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod1.Spec.Containers[0].Name, pod1.Name) @@ -347,7 +347,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64 cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1))) } expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString) - err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod2.Spec.Containers[0].Name, pod2.Name) ginkgo.By("by deleting the pods and waiting for container removal") @@ -372,7 +372,7 @@ func runMultipleCPUGuPod(f *framework.Framework) { }, } pod = makeCPUManagerPod("gu-pod", ctnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("checking if the expected cpuset was assigned") cpuListString = "1-2" @@ -394,7 +394,7 @@ func runMultipleCPUGuPod(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString) - err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) @@ -423,7 +423,7 @@ func runMultipleCPUContainersGuPod(f *framework.Framework) { }, } pod = makeCPUManagerPod("gu-pod", ctnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("checking if the expected cpuset was assigned") cpu1, cpu2 = 1, 2 @@ -445,11 +445,11 @@ func runMultipleCPUContainersGuPod(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%d|%d\n$", cpu1, cpu2) - err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) - err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[1].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[1].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[1].Name, pod.Name) @@ -475,7 +475,7 @@ func runMultipleGuPods(f *framework.Framework) { }, } pod1 = makeCPUManagerPod("gu-pod1", ctnAttrs) - pod1 = f.PodClient().CreateSync(pod1) + pod1 = e2epod.NewPodClient(f).CreateSync(pod1) ctnAttrs = []ctnAttribute{ { @@ -485,7 +485,7 @@ func runMultipleGuPods(f *framework.Framework) { }, } pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs) - pod2 = f.PodClient().CreateSync(pod2) + pod2 = e2epod.NewPodClient(f).CreateSync(pod2) ginkgo.By("checking if the expected cpuset was assigned") cpu1, cpu2 = 1, 2 @@ -507,12 +507,12 @@ func runMultipleGuPods(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1) - err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod1.Spec.Containers[0].Name, pod1.Name) expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu2) - err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod2.Spec.Containers[0].Name, pod2.Name) ginkgo.By("by deleting the pods and waiting for container removal") @@ -594,7 +594,7 @@ func runCPUManagerTests(f *framework.Framework) { }, } pod = makeCPUManagerPod("gu-pod-testremove", ctnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) ginkgo.By("checking if the expected cpuset was assigned") cpu1 = 1 @@ -608,7 +608,7 @@ func runCPUManagerTests(f *framework.Framework) { } } expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1) - err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) + err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex) framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod.Spec.Containers[0].Name, pod.Name) @@ -691,7 +691,7 @@ func runSMTAlignmentNegativeTests(f *framework.Framework) { } pod := makeCPUManagerPod("gu-pod", ctnAttrs) // CreateSync would wait for pod to become Ready - which will never happen if production code works as intended! - pod = f.PodClient().Create(pod) + pod = e2epod.NewPodClient(f).Create(pod) err := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase != v1.PodPending { @@ -700,7 +700,7 @@ func runSMTAlignmentNegativeTests(f *framework.Framework) { return false, nil }) framework.ExpectNoError(err) - pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Status.Phase != v1.PodFailed { @@ -731,7 +731,7 @@ func runSMTAlignmentPositiveTests(f *framework.Framework, smtLevel int) { }, } pod := makeCPUManagerPod("gu-pod", ctnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) for _, cnt := range pod.Spec.Containers { ginkgo.By(fmt.Sprintf("validating the container %s on Gu pod %s", cnt.Name, pod.Name)) diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index e8692de8b7c..c54a437e56b 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/apis/scheduling" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -72,8 +73,8 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] }, node) // Create pods, starting with non-critical so that the critical preempts the other pods. - f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) - f.PodClientNS(kubeapi.NamespaceSystem).CreateSync(criticalPod) + e2epod.NewPodClient(f).CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) + e2epod.PodClientNS(f, kubeapi.NamespaceSystem).CreateSync(criticalPod) // Check that non-critical pods other than the besteffort have been evicted updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) @@ -88,10 +89,10 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] }) ginkgo.AfterEach(func() { // Delete Pods - f.PodClient().DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClient().DeleteSync(burstablePodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClient().DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) // Log Events logPodEvents(f) logNodeEvents(f) diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index 7d515ae17e3..f4e02a31c0d 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -44,6 +44,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) @@ -69,7 +70,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { ginkgo.BeforeEach(func() { // Start a standalone cadvisor pod using 'createSync', the pod is running when it returns - f.PodClient().CreateSync(getCadvisorPod()) + e2epod.NewPodClient(f).CreateSync(getCadvisorPod()) // Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with // 1s housingkeeping interval rc = NewResourceCollector(containerStatsPollingPeriod) @@ -426,7 +427,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de ginkgo.By("Creating a batch of background pods") // CreatBatch is synchronized, all pods are running when it returns - f.PodClient().CreateBatch(bgPods) + e2epod.NewPodClient(f).CreateBatch(bgPods) time.Sleep(sleepBeforeCreatePods) @@ -453,7 +454,7 @@ func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, inter for i := range pods { pod := pods[i] createTimes[pod.ObjectMeta.Name] = metav1.Now() - go f.PodClient().Create(pod) + go e2epod.NewPodClient(f).Create(pod) time.Sleep(interval) } return createTimes @@ -546,7 +547,7 @@ func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod, podType st for _, pod := range pods { create := metav1.Now() createTimes[pod.Name] = create - p := f.PodClient().Create(pod) + p := e2epod.NewPodClient(f).Create(pod) framework.ExpectNoError(wait.PollImmediate(2*time.Second, framework.PodStartTimeout, podWatchedRunning(watchTimes, p.Name))) e2eLags = append(e2eLags, e2emetrics.PodLatencyData{Name: pod.Name, Latency: watchTimes[pod.Name].Time.Sub(create.Time)}) diff --git a/test/e2e_node/device_manager_test.go b/test/e2e_node/device_manager_test.go index 4624c8712f3..1860f927b20 100644 --- a/test/e2e_node/device_manager_test.go +++ b/test/e2e_node/device_manager_test.go @@ -85,7 +85,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur podName := "gu-pod-rec-pre-1" framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) // now we need to simulate a node drain, so we remove all the pods, including the sriov device plugin. @@ -131,7 +131,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod = makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = f.PodClient().Create(pod) + pod = e2epod.NewPodClient(f).Create(pod) err = e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase != v1.PodPending { return true, nil @@ -139,7 +139,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur return false, nil }) framework.ExpectNoError(err) - pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Status.Phase != v1.PodFailed { @@ -205,7 +205,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur podName := "gu-pod-rec-pre-1" framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) // now we need to simulate a node drain, so we remove all the pods, including the sriov device plugin. diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 1a291e7b9d0..63bfc01e803 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -123,7 +123,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } } dptemplate = dp.DeepCopy() - devicePluginPod = f.PodClient().CreateSync(dp) + devicePluginPod = e2epod.NewPodClient(f).CreateSync(dp) ginkgo.By("Waiting for devices to become available on the local node") gomega.Eventually(func() bool { @@ -143,10 +143,10 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.AfterEach(func() { ginkgo.By("Deleting the device plugin pod") - f.PodClient().DeleteSync(devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) + e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) ginkgo.By("Deleting any Pods created by the test") - l, err := f.PodClient().List(context.TODO(), metav1.ListOptions{}) + l, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, p := range l.Items { if p.Namespace != f.Namespace.Name { @@ -154,7 +154,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } framework.Logf("Deleting pod: %s", p.Name) - f.PodClient().DeleteSync(p.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(p.Name, metav1.DeleteOptions{}, 2*time.Minute) } restartKubelet(true) @@ -170,7 +170,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.It("Can schedule a pod that requires a device", func() { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" - pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(""))) @@ -225,12 +225,12 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.It("Keeps device plugin assignments across pod and kubelet restarts", func() { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" - pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(""))) - pod1, err := f.PodClient().Get(context.TODO(), pod1.Name, metav1.GetOptions{}) + pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ensurePodContainerRestart(f, pod1.Name, pod1.Name) @@ -243,7 +243,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { restartKubelet(true) ginkgo.By("Wait for node to be ready again") - framework.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute) + e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute) ginkgo.By("Validating that assignment is kept") ensurePodContainerRestart(f, pod1.Name, pod1.Name) @@ -254,30 +254,30 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.It("Keeps device plugin assignments after the device plugin has been re-registered", func() { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" - pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(""))) - pod1, err := f.PodClient().Get(context.TODO(), pod1.Name, metav1.GetOptions{}) + pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Restarting Kubelet") restartKubelet(true) ginkgo.By("Wait for node to be ready again") - framework.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute) + e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute) ginkgo.By("Re-Register resources and delete the plugin pod") gp := int64(0) deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - f.PodClient().DeleteSync(devicePluginPod.Name, deleteOptions, time.Minute) + e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, deleteOptions, time.Minute) waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) ginkgo.By("Recreating the plugin pod") - devicePluginPod = f.PodClient().CreateSync(dptemplate) + devicePluginPod = e2epod.NewPodClient(f).CreateSync(dptemplate) ginkgo.By("Confirming that after a kubelet and pod restart, fake-device assignment is kept") ensurePodContainerRestart(f, pod1.Name, pod1.Name) @@ -293,7 +293,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { }, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) ginkgo.By("Creating another pod") - pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) + pod2 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) ginkgo.By("Checking that pod got a different fake device") devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE) @@ -331,13 +331,13 @@ func makeBusyboxPod(resourceName, cmd string) *v1.Pod { func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) { var initialCount int32 var currentCount int32 - p, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil || len(p.Status.ContainerStatuses) < 1 { framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err) } initialCount = p.Status.ContainerStatuses[0].RestartCount gomega.Eventually(func() bool { - p, err = f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{}) + p, err = e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil || len(p.Status.ContainerStatuses) < 1 { return false } diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index e26ba1062ab..0d3ff72f946 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -37,6 +37,7 @@ import ( kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -532,7 +533,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe for _, spec := range testSpecs { pods = append(pods, spec.pod) } - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) }) ginkgo.It("should eventually evict all of the correct pods", func() { @@ -603,7 +604,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe ginkgo.By("deleting pods") for _, spec := range testSpecs { ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name)) - f.PodClient().DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute) + e2epod.NewPodClient(f).DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute) } // In case a test fails before verifying that NodeCondition no longer exist on the node, @@ -631,7 +632,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe ginkgo.By("making sure we can start a new pod after the test") podName := "test-admit-pod" - f.PodClient().CreateSync(&v1.Pod{ + e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, }, diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 6fb340164ea..47689c8e0b0 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -28,6 +28,7 @@ import ( runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -173,7 +174,7 @@ func containerGCTest(f *framework.Framework, test testRun) { ginkgo.Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() { ginkgo.BeforeEach(func() { realPods := getPods(test.testPods) - f.PodClient().CreateBatch(realPods) + e2epod.NewPodClient(f).CreateBatch(realPods) ginkgo.By("Making sure all containers restart the specified number of times") gomega.Eventually(func() error { for _, podSpec := range test.testPods { @@ -248,7 +249,7 @@ func containerGCTest(f *framework.Framework, test testRun) { ginkgo.AfterEach(func() { for _, pod := range test.testPods { ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName)) - f.PodClient().DeleteSync(pod.podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) } ginkgo.By("Making sure all containers get cleaned up") diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index 698ca508ae8..1a47554961b 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -323,7 +323,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func() { ginkgo.By("getting mounts for the test pod") command := []string{"mount"} - out := f.ExecCommandInContainer(testpod.Name, testpod.Spec.Containers[0].Name, command...) + out := e2epod.ExecCommandInContainer(f, testpod.Name, testpod.Spec.Containers[0].Name, command...) for _, mount := range mounts { ginkgo.By(fmt.Sprintf("checking that the hugetlb mount %s exists under the container", mount.MountPath)) @@ -337,7 +337,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H resourceToCgroup[resourceName], ) ginkgo.By("checking if the expected hugetlb settings were applied") - f.PodClient().Create(verifyPod) + e2epod.NewPodClient(f).Create(verifyPod) err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) framework.ExpectNoError(err) } @@ -356,13 +356,13 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H pod := getHugepagesTestPod(f, limits, mounts, volumes) ginkgo.By("by running a test pod that requests hugepages") - testpod = f.PodClient().CreateSync(pod) + testpod = e2epod.NewPodClient(f).CreateSync(pod) }) // we should use JustAfterEach because framework will teardown the client under the AfterEach method ginkgo.JustAfterEach(func() { ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name)) - f.PodClient().DeleteSync(testpod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(testpod.Name, metav1.DeleteOptions{}, 2*time.Minute) releaseHugepages() diff --git a/test/e2e_node/image_credential_provider.go b/test/e2e_node/image_credential_provider.go index 9ee6e4f3827..317c4ee3067 100644 --- a/test/e2e_node/image_credential_provider.go +++ b/test/e2e_node/image_credential_provider.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) @@ -30,10 +31,10 @@ import ( var _ = SIGDescribe("ImageCredentialProvider [Feature:KubeletCredentialProviders]", func() { f := framework.NewDefaultFramework("image-credential-provider") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) /* diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index d3fb980bff2..51f8a8b80c5 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -18,7 +18,8 @@ package e2enode import ( "context" - "k8s.io/api/core/v1" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -51,11 +52,11 @@ var _ = SIGDescribe("ImageID [NodeFeature: ImageID]", func() { }, } - pod := f.PodClient().Create(podDesc) + pod := e2epod.NewPodClient(f).Create(podDesc) framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace( f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) - runningPod, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) + runningPod, err := e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) status := runningPod.Status diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index e7b17dcde5e..a4b31a9dac5 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -31,9 +31,9 @@ import ( internalapi "k8s.io/cri-api/pkg/apis" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" commontest "k8s.io/kubernetes/test/e2e/common" - "k8s.io/kubernetes/test/e2e/framework" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -68,34 +68,34 @@ var NodePrePullImageList = sets.NewString( imageutils.GetE2EImage(imageutils.Etcd), ) -// updateImageAllowList updates the framework.ImagePrePullList with +// updateImageAllowList updates the e2epod.ImagePrePullList with // 1. the hard coded lists // 2. the ones passed in from framework.TestContext.ExtraEnvs // So this function needs to be called after the extra envs are applied. func updateImageAllowList() { // Union NodePrePullImageList and PrePulledImages into the framework image pre-pull list. - framework.ImagePrePullList = NodePrePullImageList.Union(commontest.PrePulledImages) + e2epod.ImagePrePullList = NodePrePullImageList.Union(commontest.PrePulledImages) // Images from extra envs - framework.ImagePrePullList.Insert(getNodeProblemDetectorImage()) + e2epod.ImagePrePullList.Insert(getNodeProblemDetectorImage()) if sriovDevicePluginImage, err := getSRIOVDevicePluginImage(); err != nil { klog.Errorln(err) } else { - framework.ImagePrePullList.Insert(sriovDevicePluginImage) + e2epod.ImagePrePullList.Insert(sriovDevicePluginImage) } if gpuDevicePluginImage, err := getGPUDevicePluginImage(); err != nil { klog.Errorln(err) } else { - framework.ImagePrePullList.Insert(gpuDevicePluginImage) + e2epod.ImagePrePullList.Insert(gpuDevicePluginImage) } if kubeVirtPluginImage, err := getKubeVirtDevicePluginImage(); err != nil { klog.Errorln(err) } else { - framework.ImagePrePullList.Insert(kubeVirtPluginImage) + e2epod.ImagePrePullList.Insert(kubeVirtPluginImage) } if samplePluginImage, err := getSampleDevicePluginImage(); err != nil { klog.Errorln(err) } else { - framework.ImagePrePullList.Insert(samplePluginImage) + e2epod.ImagePrePullList.Insert(samplePluginImage) } } @@ -153,7 +153,7 @@ func PrePullAllImages() error { if err != nil { return err } - images := framework.ImagePrePullList.List() + images := e2epod.ImagePrePullList.List() klog.V(4).Infof("Pre-pulling images with %s %+v", puller.Name(), images) imageCh := make(chan int, len(images)) diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go index e39f1bbe79b..3fa4f023456 100644 --- a/test/e2e_node/log_path_test.go +++ b/test/e2e_node/log_path_test.go @@ -39,7 +39,7 @@ const ( var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { f := framework.NewDefaultFramework("kubelet-container-log-path") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.Describe("Pod with a container", func() { ginkgo.Context("printed log to stdout", func() { @@ -117,7 +117,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { var logPodName string ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) logPodName = "log-pod-" + string(uuid.NewUUID()) err := createAndWaitPod(makeLogPod(logPodName, logString)) framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName) diff --git a/test/e2e_node/memory_manager_test.go b/test/e2e_node/memory_manager_test.go index 5d62365421e..ea4b9d3955b 100644 --- a/test/e2e_node/memory_manager_test.go +++ b/test/e2e_node/memory_manager_test.go @@ -351,7 +351,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.JustAfterEach(func() { // delete the test pod if testPod != nil && testPod.Name != "" { - f.PodClient().DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute) } // release hugepages @@ -441,7 +441,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.It("should succeed to start the pod", func() { ginkgo.By("Running the test pod") - testPod = f.PodClient().CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(testPod) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { @@ -466,7 +466,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.It("should succeed to start the pod", func() { ginkgo.By("Running the test pod") - testPod = f.PodClient().CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(testPod) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { @@ -497,10 +497,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.It("should succeed to start all pods", func() { ginkgo.By("Running the test pod and the test pod 2") - testPod = f.PodClient().CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(testPod) ginkgo.By("Running the test pod 2") - testPod2 = f.PodClient().CreateSync(testPod2) + testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { @@ -514,10 +514,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func() { ginkgo.By("Running the test pod and the test pod 2") - testPod = f.PodClient().CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(testPod) ginkgo.By("Running the test pod 2") - testPod2 = f.PodClient().CreateSync(testPod2) + testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -556,7 +556,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.JustAfterEach(func() { // delete the test pod 2 if testPod2.Name != "" { - f.PodClient().DeleteSync(testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute) } }) }) @@ -599,18 +599,18 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } workloadPod := makeMemoryManagerPod(workloadCtnAttrs[0].ctnName, initCtnParams, workloadCtnAttrs) - workloadPod = f.PodClient().CreateSync(workloadPod) + workloadPod = e2epod.NewPodClient(f).CreateSync(workloadPod) workloadPods = append(workloadPods, workloadPod) } }) ginkgo.It("should be rejected", func() { ginkgo.By("Creating the pod") - testPod = f.PodClient().Create(testPod) + testPod = e2epod.NewPodClient(f).Create(testPod) ginkgo.By("Checking that pod failed to start because of admission error") gomega.Eventually(func() bool { - tmpPod, err := f.PodClient().Get(context.TODO(), testPod.Name, metav1.GetOptions{}) + tmpPod, err := e2epod.NewPodClient(f).Get(context.TODO(), testPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if tmpPod.Status.Phase != v1.PodFailed { @@ -635,7 +635,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager ginkgo.JustAfterEach(func() { for _, workloadPod := range workloadPods { if workloadPod.Name != "" { - f.PodClient().DeleteSync(workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute) } } }) @@ -679,7 +679,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 ginkgo.It("should not report any memory data during request to pod resources List", func() { - testPod = f.PodClient().CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(testPod) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -703,7 +703,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) ginkgo.It("should succeed to start the pod", func() { - testPod = f.PodClient().CreateSync(testPod) + testPod = e2epod.NewPodClient(f).CreateSync(testPod) // it no taste to verify NUMA pinning when the node has only one NUMA node if !*isMultiNUMASupported { diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index a6b13833572..d10bfd9113f 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - f.PodClient().DeleteSync(pod.Name, delOpts, framework.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout) // We are going to give some more time for the CPU manager to do any clean // up it needs to do now that the pod has been deleted. Otherwise we may @@ -124,7 +124,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { // Make the pod for the workload. pod = makeNodePerfPod(wl) // Create the pod. - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) // Wait for pod success. // but avoid using WaitForSuccess because we want the container logs upon failure #109295 podErr := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), wl.Timeout(), diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index ef9a2310d79..7589c5a1ae2 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -201,7 +201,7 @@ current-context: local-context ginkgo.By("Create the node problem detector") hostPathType := new(v1.HostPathType) *hostPathType = v1.HostPathFileOrCreate - pod := f.PodClient().CreateSync(&v1.Pod{ + pod := e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -432,7 +432,7 @@ current-context: local-context framework.Logf("Node Problem Detector logs:\n %s", log) } ginkgo.By("Delete the node problem detector") - f.PodClient().Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)) + e2epod.NewPodClient(f).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)) ginkgo.By("Wait for the node problem detector to disappear") gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) ginkgo.By("Delete the config map") diff --git a/test/e2e_node/node_shutdown_linux_test.go b/test/e2e_node/node_shutdown_linux_test.go index d8f0695d3ef..98e3ba8516f 100644 --- a/test/e2e_node/node_shutdown_linux_test.go +++ b/test/e2e_node/node_shutdown_linux_test.go @@ -39,6 +39,7 @@ import ( "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/godbus/dbus/v5" v1 "k8s.io/api/core/v1" @@ -99,9 +100,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut } ginkgo.By("Creating batch pods") - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) - list, err := f.PodClient().List(context.TODO(), metav1.ListOptions{ + list, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ FieldSelector: nodeSelector, }) framework.ExpectNoError(err) @@ -149,7 +150,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut ginkgo.By("Verifying that non-critical pods are shutdown") // Not critical pod should be shutdown gomega.Eventually(func() error { - list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{ + list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ FieldSelector: nodeSelector, }) if err != nil { @@ -176,7 +177,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut ginkgo.By("Verifying that all pods are shutdown") // All pod should be shutdown gomega.Eventually(func() error { - list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{ + list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ FieldSelector: nodeSelector, }) if err != nil { @@ -368,9 +369,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut } ginkgo.By("Creating batch pods") - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) - list, err := f.PodClient().List(context.TODO(), metav1.ListOptions{ + list, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ FieldSelector: nodeSelector, }) framework.ExpectNoError(err) @@ -391,7 +392,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut for _, step := range downSteps { gomega.Eventually(func() error { - list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{ + list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{ FieldSelector: nodeSelector, }) if err != nil { diff --git a/test/e2e_node/numa_alignment.go b/test/e2e_node/numa_alignment.go index e82c4d41c15..01226482a4a 100644 --- a/test/e2e_node/numa_alignment.go +++ b/test/e2e_node/numa_alignment.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) type numaPodResources struct { @@ -107,7 +108,7 @@ func getCPUToNUMANodeMapFromEnv(f *framework.Framework, pod *v1.Pod, cnt *v1.Con cpusPerNUMA := make(map[int][]int) for numaNode := 0; numaNode < numaNodes; numaNode++ { - nodeCPUList := f.ExecCommandInContainer(pod.Name, cnt.Name, + nodeCPUList := e2epod.ExecCommandInContainer(f, pod.Name, cnt.Name, "/bin/cat", fmt.Sprintf("/sys/devices/system/node/node%d/cpulist", numaNode)) cpus, err := cpuset.Parse(nodeCPUList) @@ -152,7 +153,7 @@ func getPCIDeviceToNumaNodeMapFromEnv(f *framework.Framework, pod *v1.Pod, cnt * // a single plugin can allocate more than a single device pciDevs := strings.Split(value, ",") for _, pciDev := range pciDevs { - pciDevNUMANode := f.ExecCommandInContainer(pod.Name, cnt.Name, + pciDevNUMANode := e2epod.ExecCommandInContainer(f, pod.Name, cnt.Name, "/bin/cat", fmt.Sprintf("/sys/bus/pci/devices/%s/numa_node", pciDev)) NUMAPerDev[pciDev] = numaNodeFromSysFsEntry(pciDevNUMANode) } diff --git a/test/e2e_node/os_label_rename_test.go b/test/e2e_node/os_label_rename_test.go index 257080d9434..e63a75895e6 100644 --- a/test/e2e_node/os_label_rename_test.go +++ b/test/e2e_node/os_label_rename_test.go @@ -34,6 +34,7 @@ import ( v1core "k8s.io/client-go/kubernetes/typed/core/v1" nodeutil "k8s.io/component-helpers/node/util" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" admissionapi "k8s.io/pod-security-admission/api" ) @@ -43,8 +44,8 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu ginkgo.Context("Kubelet", func() { ginkgo.It("should reconcile the OS and Arch labels when restarted", func() { node := getLocalNode(f) - framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) - framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) + e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) + e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) ginkgo.By("killing and restarting kubelet") // Let's kill the kubelet @@ -57,7 +58,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu framework.ExpectNoError(err) // Restart kubelet startKubelet() - framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.RestartNodeReadyAgainTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, framework.RestartNodeReadyAgainTimeout)) // If this happens right, node should have all the labels reset properly err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute) framework.ExpectNoError(err) @@ -65,8 +66,8 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu ginkgo.It("should reconcile the OS and Arch labels when running", func() { node := getLocalNode(f) - framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) - framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) + e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) + e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) // Update labels newNode := node.DeepCopy() diff --git a/test/e2e_node/pids_test.go b/test/e2e_node/pids_test.go index 39af2b1bbcf..b8dd2a05097 100644 --- a/test/e2e_node/pids_test.go +++ b/test/e2e_node/pids_test.go @@ -89,7 +89,7 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod { func runPodPidsLimitTests(f *framework.Framework) { ginkgo.It("should set pids.max for Pod", func() { ginkgo.By("by creating a G pod") - pod := f.PodClient().Create(&v1.Pod{ + pod := e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -112,7 +112,7 @@ func runPodPidsLimitTests(f *framework.Framework) { podUID := string(pod.UID) ginkgo.By("checking if the expected pids settings were applied") verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024")) - f.PodClient().Create(verifyPod) + e2epod.NewPodClient(f).Create(verifyPod) err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) diff --git a/test/e2e_node/pod_conditions_test.go b/test/e2e_node/pod_conditions_test.go index e0629398d44..3fd4d64725a 100644 --- a/test/e2e_node/pod_conditions_test.go +++ b/test/e2e_node/pod_conditions_test.go @@ -89,7 +89,7 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec }, } - p = f.PodClient().Create(p) + p = e2epod.NewPodClient(f).Create(p) ginkgo.By("waiting until kubelet has started trying to set up the pod and started to fail") @@ -101,7 +101,7 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec }.AsSelector().String() e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, "MountVolume.SetUp failed for volume", framework.PodEventTimeout) - p, err := f.PodClient().Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("checking pod condition for a pod whose sandbox creation is blocked") @@ -139,10 +139,10 @@ func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkP return func() { ginkgo.By("creating a pod that successfully comes up in a ready/running state") - p := f.PodClient().Create(webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers)) + p := e2epod.NewPodClient(f).Create(webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers)) e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) - p, err := f.PodClient().Get(context.TODO(), p.Name, metav1.GetOptions{}) + p, err := e2epod.NewPodClient(f).Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) diff --git a/test/e2e_node/pod_hostnamefqdn_test.go b/test/e2e_node/pod_hostnamefqdn_test.go index 76e4b57ceec..d80a96641ca 100644 --- a/test/e2e_node/pod_hostnamefqdn_test.go +++ b/test/e2e_node/pod_hostnamefqdn_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo/v2" @@ -83,7 +84,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, pod.ObjectMeta.Name)} // Create Pod - f.TestContainerOutput("shortname only", pod, 0, output) + e2eoutput.TestContainerOutput(f, "shortname only", pod, 0, output) }) /* @@ -100,7 +101,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, pod.ObjectMeta.Name)} // Create Pod - f.TestContainerOutput("shortname only", pod, 0, output) + e2eoutput.TestContainerOutput(f, "shortname only", pod, 0, output) }) /* @@ -119,7 +120,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, hostFQDN)} // Create Pod - f.TestContainerOutput("shortname and fqdn", pod, 0, output) + e2eoutput.TestContainerOutput(f, "shortname and fqdn", pod, 0, output) }) /* @@ -144,7 +145,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { framework.ExpectEqual(len(hostFQDN) < 65, true, fmt.Sprintf("The FQDN of the Pod cannot be longer than 64 characters, requested %s which is %d characters long.", hostFQDN, len(hostFQDN))) output := []string{fmt.Sprintf("%s;%s;", hostFQDN, hostFQDN)} // Create Pod - f.TestContainerOutput("fqdn and fqdn", pod, 0, output) + e2eoutput.TestContainerOutput(f, "fqdn and fqdn", pod, 0, output) }) /* @@ -170,9 +171,9 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { setHostnameAsFQDN := true pod.Spec.SetHostnameAsFQDN = &setHostnameAsFQDN // Create Pod - launchedPod := f.PodClient().Create(pod) + launchedPod := e2epod.NewPodClient(f).Create(pod) // Ensure we delete pod - defer f.PodClient().DeleteSync(launchedPod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + defer e2epod.NewPodClient(f).DeleteSync(launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) // Pod should remain in the pending state generating events with reason FailedCreatePodSandBox // Expected Message Error Event diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index 8f50548663f..5bca95375ca 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -164,7 +164,7 @@ func newTestPodData() *testPodData { func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podDesc) { for _, podReq := range podReqs { pod := makePodResourcesTestPod(podReq) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) framework.Logf("created pod %s", podReq.podName) tpd.PodMap[podReq.podName] = pod diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index 00f4802e4bc..9e98117c8d7 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -20,7 +20,7 @@ import ( "context" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -176,7 +176,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { } cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup} pod := makePodToVerifyCgroups(cgroupsToVerify) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) @@ -194,7 +194,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { podUID string ) ginkgo.By("Creating a Guaranteed pod in Namespace", func() { - guaranteedPod = f.PodClient().Create(&v1.Pod{ + guaranteedPod = e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -214,16 +214,16 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []string{"pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := e2epod.NewPodClient(f).Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("pod" + podUID) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) @@ -239,7 +239,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { bestEffortPod *v1.Pod ) ginkgo.By("Creating a BestEffort pod in Namespace", func() { - bestEffortPod = f.PodClient().Create(&v1.Pod{ + bestEffortPod = e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -259,16 +259,16 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []string{"besteffort/pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := e2epod.NewPodClient(f).Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) @@ -284,7 +284,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { burstablePod *v1.Pod ) ginkgo.By("Creating a Burstable pod in Namespace", func() { - burstablePod = f.PodClient().Create(&v1.Pod{ + burstablePod = e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -304,16 +304,16 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []string{"burstable/pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := e2epod.NewPodClient(f).Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID) - f.PodClient().Create(pod) + e2epod.NewPodClient(f).Create(pod) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 9b54ae79d79..b5af64af376 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -379,7 +379,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) + err := e2epod.NewPodClient(f).Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) if apierrors.IsNotFound(err) { framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err) } diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go index fd3feb8db4f..bac859738f3 100644 --- a/test/e2e_node/resource_metrics_test.go +++ b/test/e2e_node/resource_metrics_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" admissionapi "k8s.io/pod-security-admission/api" @@ -49,7 +50,7 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() { ginkgo.By("Creating test pods to measure their resource usage") numRestarts := int32(1) pods := getSummaryTestPods(f, numRestarts, pod0, pod1) - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) ginkgo.By("restarting the containers to ensure container metrics are still being gathered after a container is restarted") gomega.Eventually(func() error { @@ -113,8 +114,8 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() { ginkgo.AfterEach(func() { ginkgo.By("Deleting test pods") var zero int64 = 0 - f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) - f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) + e2epod.NewPodClient(f).DeleteSync(pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) + e2epod.NewPodClient(f).DeleteSync(pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute) if !ginkgo.CurrentSpecReport().Failed() { return } diff --git a/test/e2e_node/resource_usage_test.go b/test/e2e_node/resource_usage_test.go index 0efac93d060..5eac30e5b11 100644 --- a/test/e2e_node/resource_usage_test.go +++ b/test/e2e_node/resource_usage_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2eperf "k8s.io/kubernetes/test/e2e/framework/perf" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -55,7 +56,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() { // The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to // show the resource usage spikes. But changing its interval increases the overhead // of kubelet. Hence we use a Cadvisor pod. - f.PodClient().CreateSync(getCadvisorPod()) + e2epod.NewPodClient(f).CreateSync(getCadvisorPod()) rc = NewResourceCollector(containerStatsPollingPeriod) }) @@ -155,7 +156,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg defer rc.Stop() ginkgo.By("Creating a batch of Pods") - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) // wait for a while to let the node be steady time.Sleep(sleepAfterCreatePods) diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 36d3493e898..ba4778dfeaf 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -42,7 +43,7 @@ import ( // If the timeout is hit, it returns the list of currently running pods. func waitForPods(f *framework.Framework, podCount int, timeout time.Duration) (runningPods []*v1.Pod) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { - podList, err := f.PodClient().List(context.TODO(), metav1.ListOptions{}) + podList, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pods on node: %v", err) continue diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 924631fa78e..2c7778ccf13 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -22,10 +22,11 @@ import ( "path/filepath" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/test/e2e/common/node" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e_node/services" admissionapi "k8s.io/pod-security-admission/api" @@ -70,7 +71,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() { name := "image-pull-test" command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} container := node.ConformanceContainer{ - PodClient: f.PodClient(), + PodClient: e2epod.NewPodClient(f), Container: v1.Container{ Name: name, Image: testCase.image, diff --git a/test/e2e_node/runtimeclass_test.go b/test/e2e_node/runtimeclass_test.go index b4529b78390..a64260d53df 100644 --- a/test/e2e_node/runtimeclass_test.go +++ b/test/e2e_node/runtimeclass_test.go @@ -118,7 +118,7 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() { framework.ExpectNoError(err, "failed to create RuntimeClass resource") }) ginkgo.By("Creating a Guaranteed pod with which has Overhead defined", func() { - guaranteedPod = f.PodClient().CreateSync(&v1.Pod{ + guaranteedPod = e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pod-with-overhead-", Namespace: f.Namespace.Name, @@ -140,7 +140,7 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() { ginkgo.By("Checking if the pod cgroup was created appropriately", func() { cgroupsToVerify := []string{"pod" + podUID} pod := makePodToVerifyCgroupSize(cgroupsToVerify, "30000", "251658240") - pod = f.PodClient().Create(pod) + pod = e2epod.NewPodClient(f).Create(pod) err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) framework.ExpectNoError(err) }) diff --git a/test/e2e_node/seccompdefault_test.go b/test/e2e_node/seccompdefault_test.go index 49dc05d733f..7d61ff41e71 100644 --- a/test/e2e_node/seccompdefault_test.go +++ b/test/e2e_node/seccompdefault_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/test/e2e/framework" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" admissionapi "k8s.io/pod-security-admission/api" ) @@ -60,12 +61,12 @@ var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly ginkgo.It("should use the default seccomp profile when unspecified", func() { pod := newPod(nil) - f.TestContainerOutput("SeccompDefault", pod, 0, []string{"2"}) + e2eoutput.TestContainerOutput(f, "SeccompDefault", pod, 0, []string{"2"}) }) ginkgo.It("should use unconfined when specified", func() { pod := newPod(&v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}}) - f.TestContainerOutput("SeccompDefault-unconfined", pod, 0, []string{"0"}) + e2eoutput.TestContainerOutput(f, "SeccompDefault-unconfined", pod, 0, []string{"0"}) }) }) }) diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index 95a782e67a4..4d691f49254 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -37,15 +37,15 @@ import ( var _ = SIGDescribe("Security Context", func() { f := framework.NewDefaultFramework("security-context-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - var podClient *framework.PodClient + var podClient *e2epod.PodClient ginkgo.BeforeEach(func() { - podClient = f.PodClient() + podClient = e2epod.NewPodClient(f) }) ginkgo.Context("[NodeConformance][LinuxOnly] Container PID namespace sharing", func() { ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func() { ginkgo.By("Create a pod with isolated PID namespaces.") - f.PodClient().CreateSync(&v1.Pod{ + e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"}, Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -65,8 +65,8 @@ var _ = SIGDescribe("Security Context", func() { }) ginkgo.By("Check if both containers receive PID 1.") - pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top") - pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep") + pid1 := e2epod.ExecCommandInContainer(f, "isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top") + pid2 := e2epod.ExecCommandInContainer(f, "isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep") if pid1 != "1" || pid2 != "1" { framework.Failf("PIDs of different containers are not all 1: test-container-1=%v, test-container-2=%v", pid1, pid2) } @@ -74,7 +74,7 @@ var _ = SIGDescribe("Security Context", func() { ginkgo.It("processes in containers sharing a pod namespace should be able to see each other", func() { ginkgo.By("Create a pod with shared PID namespace.") - f.PodClient().CreateSync(&v1.Pod{ + e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"}, Spec: v1.PodSpec{ ShareProcessNamespace: &[]bool{true}[0], @@ -95,8 +95,8 @@ var _ = SIGDescribe("Security Context", func() { }) ginkgo.By("Check if the process in one container is visible to the process in the other.") - pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top") - pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top") + pid1 := e2epod.ExecCommandInContainer(f, "shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top") + pid2 := e2epod.ExecCommandInContainer(f, "shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top") if pid1 != pid2 { framework.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2) } @@ -141,7 +141,7 @@ var _ = SIGDescribe("Security Context", func() { true, )) - output := f.ExecShellInContainer(nginxPodName, nginxPodName, + output := e2epod.ExecShellInContainer(f, nginxPodName, nginxPodName, "cat /var/run/nginx.pid") nginxPid = strings.TrimSpace(output) }) diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index 35c1a38965e..411bce3d982 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -28,6 +28,7 @@ import ( kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" admissionapi "k8s.io/pod-security-admission/api" @@ -59,7 +60,7 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() { ginkgo.By("Creating test pods") numRestarts := int32(1) pods := getSummaryTestPods(f, numRestarts, pod0, pod1) - f.PodClient().CreateBatch(pods) + e2epod.NewPodClient(f).CreateBatch(pods) ginkgo.By("restarting the containers to ensure container metrics are still being gathered after a container is restarted") gomega.Eventually(func() error { diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go index a00bfa8297a..f388e10ed99 100644 --- a/test/e2e_node/system_node_critical_test.go +++ b/test/e2e_node/system_node_critical_test.go @@ -21,7 +21,7 @@ import ( "os" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/uuid" kubeapi "k8s.io/kubernetes/pkg/apis/core" diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index 4b4e174c7b6..51beb171fd4 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -402,7 +402,7 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr podName := fmt.Sprintf("gu-pod-%d", podID) framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = f.PodClient().CreateSync(pod) + pod = e2epod.NewPodClient(f).CreateSync(pod) framework.Logf("created pod %s", podName) podMap[podName] = pod } @@ -444,7 +444,7 @@ func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAtt framework.Logf("creating pod %s attrs %v", podName, ctnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) - pod = f.PodClient().Create(pod) + pod = e2epod.NewPodClient(f).Create(pod) err := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase != v1.PodPending { return true, nil @@ -452,7 +452,7 @@ func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAtt return false, nil }) framework.ExpectNoError(err) - pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Status.Phase != v1.PodFailed { diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index 896f6c81aa5..21afa80118b 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -20,7 +20,7 @@ import ( "context" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" @@ -44,7 +44,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { ) ginkgo.By("Creating a pod with a memory backed volume that exits success without restart", func() { volumeName = "memory-volume" - memoryBackedPod = f.PodClient().Create(&v1.Pod{ + memoryBackedPod = e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -83,7 +83,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { for i := 0; i < 10; i++ { // need to create a new verification pod on each pass since updates //to the HostPath volume aren't propogated to the pod - pod := f.PodClient().Create(&v1.Pod{ + pod := e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, @@ -117,7 +117,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { }) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) gp := int64(1) - f.PodClient().Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) + e2epod.NewPodClient(f).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) if err == nil { break }