e2e: adapt to moved code

This is the result of automatically editing source files like this:

    go install golang.org/x/tools/cmd/goimports@latest
    find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh

with e2e-framework-sed.sh containing this:

sed -i \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \
    -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \
    -e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \
    -e "s/framework.AllNodes\b/e2edebug.AllNodes/" \
    -e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \
    -e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \
    -e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \
    -e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \
    -e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \
    -e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \
    -e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \
    -e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \
    -e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \
    -e "s/framework.EventsLister\b/e2edebug.EventsLister/" \
    -e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \
    -e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \
    -e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \
    -e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \
    -e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \
    -e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \
    -e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \
    -e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \
    -e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \
    -e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \
    -e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \
    -e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \
    -e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \
    -e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \
    -e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \
    -e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \
    -e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \
    -e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \
    -e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \
    -e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \
    -e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \
    -e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \
    -e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \
    -e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \
    -e "s/framework.NodesSet\b/e2edebug.NodesSet/" \
    -e "s/framework.PodClient\b/e2epod.PodClient/" \
    -e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \
    -e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \
    -e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \
    -e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \
    -e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \
    -e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \
    -e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \
    -e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \
    -e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \
    -e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \
    -e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \
    -e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \
    -e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \
    -e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \
    -e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \
    -e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \
    -e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \
    -e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \
    -e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \
    -e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \
    -e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \
    -e "s/framework.WorkItem\b/e2edebug.WorkItem/" \
    "$@"

for i in "$@"; do
    # Import all sub packages and let goimports figure out which of those
    # are redundant (= already imported) or not needed.
    sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i"
    sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i"
    sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i"
    sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i"
    sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i"
    sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i"
    goimports -w "$i"
done
This commit is contained in:
Patrick Ohly 2022-09-08 16:04:17 +02:00
parent 92047da152
commit dfdf88d4fa
160 changed files with 822 additions and 715 deletions

View File

@ -42,6 +42,7 @@ import (
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/kube-openapi/pkg/validation/spec"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/utils/crd" "k8s.io/kubernetes/test/utils/crd"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
) )
@ -76,22 +77,22 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with known and required properties") ginkgo.By("kubectl validation (kubectl create and apply) allows request with known and required properties")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta) validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create valid CR %s: %v", validCR, err) framework.Failf("failed to create valid CR %s: %v", validCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
framework.Failf("failed to delete valid CR: %v", err) framework.Failf("failed to delete valid CR: %v", err)
} }
if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply valid CR %s: %v", validCR, err) framework.Failf("failed to apply valid CR %s: %v", validCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
framework.Failf("failed to delete valid CR: %v", err) framework.Failf("failed to delete valid CR: %v", err)
} }
ginkgo.By("kubectl validation (kubectl create and apply) rejects request with value outside defined enum values") ginkgo.By("kubectl validation (kubectl create and apply) rejects request with value outside defined enum values")
badEnumValueCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar", "feeling":"NonExistentValue"}]}}`, meta) badEnumValueCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar", "feeling":"NonExistentValue"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, badEnumValueCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `Unsupported value: "NonExistentValue"`) { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, badEnumValueCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `Unsupported value: "NonExistentValue"`) {
framework.Failf("unexpected no error when creating CR with unknown enum value: %v", err) framework.Failf("unexpected no error when creating CR with unknown enum value: %v", err)
} }
@ -99,20 +100,20 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
// Because server-side is default in beta but not GA yet, we will produce different behaviors in the default vs GA only conformance tests. We have made the error generic enough to pass both, but should go back and make the error more specific once server-side validation goes GA. // Because server-side is default in beta but not GA yet, we will produce different behaviors in the default vs GA only conformance tests. We have made the error generic enough to pass both, but should go back and make the error more specific once server-side validation goes GA.
ginkgo.By("kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema") ginkgo.By("kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema")
unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta) unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) {
framework.Failf("unexpected no error when creating CR with unknown field: %v", err) framework.Failf("unexpected no error when creating CR with unknown field: %v", err)
} }
if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) {
framework.Failf("unexpected no error when applying CR with unknown field: %v", err) framework.Failf("unexpected no error when applying CR with unknown field: %v", err)
} }
// TODO: see above note, we should check the value of the error once server-side validation is GA. // TODO: see above note, we should check the value of the error once server-side validation is GA.
ginkgo.By("kubectl validation (kubectl create and apply) rejects request without required properties") ginkgo.By("kubectl validation (kubectl create and apply) rejects request without required properties")
noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta) noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) {
framework.Failf("unexpected no error when creating CR without required field: %v", err) framework.Failf("unexpected no error when creating CR without required field: %v", err)
} }
if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) {
framework.Failf("unexpected no error when applying CR without required field: %v", err) framework.Failf("unexpected no error when applying CR without required field: %v", err)
} }
@ -133,7 +134,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
} }
ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist") ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist")
if _, err := framework.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err) framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
} }
@ -160,16 +161,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties") ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err) framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err) framework.Failf("failed to delete random CR: %v", err)
} }
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err) framework.Failf("failed to delete random CR: %v", err)
} }
@ -201,16 +202,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties") ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err) framework.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err) framework.Failf("failed to delete random CR: %v", err)
} }
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err) framework.Failf("failed to delete random CR: %v", err)
} }
@ -243,16 +244,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties") ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties")
randomCR := fmt.Sprintf(`{%s,"spec":{"a":null,"b":[{"c":"d"}]}}`, meta) randomCR := fmt.Sprintf(`{%s,"spec":{"a":null,"b":[{"c":"d"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err) framework.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err) framework.Failf("failed to delete random CR: %v", err)
} }
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err) framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
} }
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil { if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err) framework.Failf("failed to delete random CR: %v", err)
} }
@ -715,7 +716,7 @@ func dropDefaults(s *spec.Schema) {
} }
func verifyKubectlExplain(ns, name, pattern string) error { func verifyKubectlExplain(ns, name, pattern string) error {
result, err := framework.RunKubectl(ns, "explain", name) result, err := e2ekubectl.RunKubectl(ns, "explain", name)
if err != nil { if err != nil {
return fmt.Errorf("failed to explain %s: %v", name, err) return fmt.Errorf("failed to explain %s: %v", name, err)
} }

View File

@ -44,6 +44,7 @@ import (
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils/crd" "k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -1183,7 +1184,7 @@ func testAttachingPodWebhook(f *framework.Framework) {
ginkgo.By("'kubectl attach' the pod, should be denied by the webhook") ginkgo.By("'kubectl attach' the pod, should be denied by the webhook")
timer := time.NewTimer(30 * time.Second) timer := time.NewTimer(30 * time.Second)
defer timer.Stop() defer timer.Stop()
_, err = framework.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec() _, err = e2ekubectl.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook") framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook")
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) { if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a) framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/cluster/ports" "k8s.io/kubernetes/pkg/cluster/ports"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -335,7 +336,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
} }
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts { if postRestarts != preRestarts {
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf) e2edebug.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
} }
}) })

View File

@ -53,6 +53,7 @@ import (
"k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -770,7 +771,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
return pod.DeletionTimestamp == nil && oldVersion == pod.Spec.Containers[0].Env[0].Value return pod.DeletionTimestamp == nil && oldVersion == pod.Spec.Containers[0].Env[0].Value
}); pod != nil { }); pod != nil {
// make the /tmp/ready file read only, which will cause readiness to fail // make the /tmp/ready file read only, which will cause readiness to fail
if _, err := framework.RunKubectl(pod.Namespace, "exec", "-c", pod.Spec.Containers[0].Name, pod.Name, "--", "/bin/sh", "-ec", "echo 0 > /var/tmp/ready"); err != nil { if _, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", "-c", pod.Spec.Containers[0].Name, pod.Name, "--", "/bin/sh", "-ec", "echo 0 > /var/tmp/ready"); err != nil {
framework.Logf("Failed to mark pod %s as unready via exec: %v", pod.Name, err) framework.Logf("Failed to mark pod %s as unready via exec: %v", pod.Name, err)
} else { } else {
framework.Logf("Marked old pod %s as unready", pod.Name) framework.Logf("Marked old pod %s as unready", pod.Name)

View File

@ -19,10 +19,11 @@ package apps
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/onsi/gomega"
"strings" "strings"
"time" "time"
"github.com/onsi/gomega"
jsonpatch "github.com/evanphx/json-patch" jsonpatch "github.com/evanphx/json-patch"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"

View File

@ -351,7 +351,7 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism))) gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
pod := pods.Items[0] pod := pods.Items[0]
f.PodClient().Update(pod.Name, func(pod *v1.Pod) { e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.OwnerReferences = nil pod.OwnerReferences = nil
}) })
@ -370,7 +370,7 @@ var _ = SIGDescribe("Job", func() {
)).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name) )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
ginkgo.By("Removing the labels from the Job's Pod") ginkgo.By("Removing the labels from the Job's Pod")
f.PodClient().Update(pod.Name, func(pod *v1.Pod) { e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.Labels = nil pod.Labels = nil
}) })

View File

@ -596,7 +596,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
func testRCAdoptMatchingOrphans(f *framework.Framework) { func testRCAdoptMatchingOrphans(f *framework.Framework) {
name := "pod-adoption" name := "pod-adoption"
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
p := f.PodClient().CreateSync(&v1.Pod{ p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{

View File

@ -323,7 +323,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
name := "pod-adoption-release" name := "pod-adoption-release"
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
p := f.PodClient().CreateSync(&v1.Pod{ p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{

View File

@ -45,8 +45,10 @@ import (
watchtools "k8s.io/client-go/tools/watch" watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
@ -121,7 +123,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
if ginkgo.CurrentSpecReport().Failed() { if ginkgo.CurrentSpecReport().Failed() {
framework.DumpDebugInfo(c, ns) e2eoutput.DumpDebugInfo(c, ns)
} }
framework.Logf("Deleting all statefulset in ns %v", ns) framework.Logf("Deleting all statefulset in ns %v", ns)
e2estatefulset.DeleteAllStatefulSets(c, ns) e2estatefulset.DeleteAllStatefulSets(c, ns)
@ -195,7 +197,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectEqual(controllerRef.UID, ss.UID) framework.ExpectEqual(controllerRef.UID, ss.UID)
ginkgo.By("Orphaning one of the stateful set's pods") ginkgo.By("Orphaning one of the stateful set's pods")
f.PodClient().Update(pod.Name, func(pod *v1.Pod) { e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.OwnerReferences = nil pod.OwnerReferences = nil
}) })
@ -215,7 +217,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Removing the labels from one of the stateful set's pods") ginkgo.By("Removing the labels from one of the stateful set's pods")
prevLabels := pod.Labels prevLabels := pod.Labels
f.PodClient().Update(pod.Name, func(pod *v1.Pod) { e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.Labels = nil pod.Labels = nil
}) })
@ -232,7 +234,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// If we don't do this, the test leaks the Pod and PVC. // If we don't do this, the test leaks the Pod and PVC.
ginkgo.By("Readding labels to the stateful set's pod") ginkgo.By("Readding labels to the stateful set's pod")
f.PodClient().Update(pod.Name, func(pod *v1.Pod) { e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.Labels = prevLabels pod.Labels = prevLabels
}) })
@ -1108,7 +1110,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
if ginkgo.CurrentSpecReport().Failed() { if ginkgo.CurrentSpecReport().Failed() {
framework.DumpDebugInfo(c, ns) e2eoutput.DumpDebugInfo(c, ns)
} }
framework.Logf("Deleting all statefulset in ns %v", ns) framework.Logf("Deleting all statefulset in ns %v", ns)
e2estatefulset.DeleteAllStatefulSets(c, ns) e2estatefulset.DeleteAllStatefulSets(c, ns)
@ -1201,7 +1203,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2) e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2)
ginkgo.By("check availableReplicas are shown in status") ginkgo.By("check availableReplicas are shown in status")
out, err := framework.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml") out, err := e2ekubectl.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml")
framework.ExpectNoError(err) framework.ExpectNoError(err)
if !strings.Contains(out, "availableReplicas: 2") { if !strings.Contains(out, "availableReplicas: 2") {
framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 2, out) framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 2, out)
@ -1231,7 +1233,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
if ginkgo.CurrentSpecReport().Failed() { if ginkgo.CurrentSpecReport().Failed() {
framework.DumpDebugInfo(c, ns) e2eoutput.DumpDebugInfo(c, ns)
} }
framework.Logf("Deleting all statefulset in ns %v", ns) framework.Logf("Deleting all statefulset in ns %v", ns)
e2estatefulset.DeleteAllStatefulSets(c, ns) e2estatefulset.DeleteAllStatefulSets(c, ns)
@ -1350,7 +1352,7 @@ var _ = SIGDescribe("StatefulSet", func() {
func kubectlExecWithRetries(ns string, args ...string) (out string) { func kubectlExecWithRetries(ns string, args ...string) (out string) {
var err error var err error
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
if out, err = framework.RunKubectl(ns, args...); err == nil { if out, err = e2ekubectl.RunKubectl(ns, args...); err == nil {
return return
} }
framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
@ -1414,14 +1416,14 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex) name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
for k, v := range kv { for k, v := range kv {
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v) cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
framework.Logf(framework.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) framework.Logf(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
} }
} }
func (z *zookeeperTester) read(statefulPodIndex int, key string) string { func (z *zookeeperTester) read(statefulPodIndex int, key string) string {
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex) name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh get /%v", key) cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh get /%v", key)
return lastLine(framework.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd)) return lastLine(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
} }
type mysqlGaleraTester struct { type mysqlGaleraTester struct {
@ -1478,7 +1480,7 @@ func (m *redisTester) name() string {
func (m *redisTester) redisExec(cmd, ns, podName string) string { func (m *redisTester) redisExec(cmd, ns, podName string) string {
cmd = fmt.Sprintf("/opt/redis/redis-cli -h %v %v", podName, cmd) cmd = fmt.Sprintf("/opt/redis/redis-cli -h %v %v", podName, cmd)
return framework.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd)
} }
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet { func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
@ -1509,7 +1511,7 @@ func (c *cockroachDBTester) name() string {
func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string { func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
cmd = fmt.Sprintf("/cockroach/cockroach sql --insecure --host %s.cockroachdb -e \"%v\"", podName, cmd) cmd = fmt.Sprintf("/cockroach/cockroach sql --insecure --host %s.cockroachdb -e \"%v\"", podName, cmd)
return framework.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd) return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd)
} }
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet { func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
@ -1710,7 +1712,7 @@ func breakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
} }
// Ignore 'mv' errors to make this idempotent. // Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path) cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
stdout, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout) stdout, err := e2eoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout)
framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err return err
} }
@ -1734,7 +1736,7 @@ func restorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
} }
// Ignore 'mv' errors to make this idempotent. // Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path) cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
stdout, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout) stdout, err := e2eoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout)
framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err return err
} }

View File

@ -37,7 +37,7 @@ var _ = SIGDescribe("Conformance Tests", func() {
*/ */
framework.ConformanceIt("should have at least two untainted nodes", func() { framework.ConformanceIt("should have at least two untainted nodes", func() {
ginkgo.By("Getting node addresses") ginkgo.By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(nodeList.Items) < 2 { if len(nodeList.Items) < 2 {

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -60,7 +61,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
for _, nodeIP := range nodeIPs { for _, nodeIP := range nodeIPs {
// Anonymous authentication is disabled by default // Anonymous authentication is disabled by default
host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort))
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s/metrics", "%{http_code}", host)) result := e2eoutput.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s/metrics", "%{http_code}", host))
gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials") gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
} }
}) })
@ -82,7 +83,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
for _, nodeIP := range nodeIPs { for _, nodeIP := range nodeIPs {
host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort)) host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort))
result := framework.RunHostCmdOrDie(ns, result := e2eoutput.RunHostCmdOrDie(ns,
pod.Name, pod.Name,
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s/metrics", fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s/metrics",
"%{http_code}", "%{http_code}",
@ -96,5 +97,5 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod { func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "agnhost-pod", nil, nil, nil) pod := e2epod.NewAgnhostPod(f.Namespace.Name, "agnhost-pod", nil, nil, nil)
pod.ObjectMeta.GenerateName = "test-node-authn-" pod.ObjectMeta.GenerateName = "test-node-authn-"
return f.PodClient().CreateSync(pod) return e2epod.NewPodClient(f).CreateSync(pod)
} }

View File

@ -19,9 +19,10 @@ package auth
import ( import (
"context" "context"
"fmt" "fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"time" "time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"

View File

@ -41,6 +41,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -316,7 +317,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`), fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`),
} }
f.TestContainerOutputRegexp("service account token: ", pod, 0, output) e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output)
}) })
/* /*
@ -424,7 +425,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID), fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID),
fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID), fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID),
} }
f.TestContainerOutputRegexp("service account token: ", pod, 0, output) e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output)
} }
}) })

View File

@ -24,7 +24,7 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"

View File

@ -44,6 +44,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest" e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -381,7 +382,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes // We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below. // will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, resizeTimeout))
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC") ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC")
@ -564,7 +565,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
removeLabels := func(nodesToClean sets.String) { removeLabels := func(nodesToClean sets.String) {
ginkgo.By("Removing labels from nodes") ginkgo.By("Removing labels from nodes")
for node := range nodesToClean { for node := range nodesToClean {
framework.RemoveLabelOffNode(c, node, labelKey) e2enode.RemoveLabelOffNode(c, node, labelKey)
} }
} }
@ -575,7 +576,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes)) ginkgo.By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
for node := range nodesSet { for node := range nodesSet {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue) e2enode.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
} }
err = scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false) err = scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
@ -593,7 +594,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
if len(newNodesSet) > 1 { if len(newNodesSet) > 1 {
ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet)) ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
klog.Infof("Usually only 1 new node is expected, investigating") klog.Infof("Usually only 1 new node is expected, investigating")
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) klog.Infof("Kubectl:%s\n", e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
if output, err := exec.Command("gcloud", "compute", "instances", "list", if output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+framework.TestContext.CloudConfig.ProjectID, "--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil { "--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
@ -629,7 +630,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
} }
ginkgo.By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List())) ginkgo.By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
for node := range registeredNodes { for node := range registeredNodes {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue) e2enode.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
} }
defer removeLabels(registeredNodes) defer removeLabels(registeredNodes)
@ -1416,8 +1417,8 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface
klog.Infof("Too many pods are not ready yet: %v", notready) klog.Infof("Too many pods are not ready yet: %v", notready)
} }
klog.Info("Timeout on waiting for pods being ready") klog.Info("Timeout on waiting for pods being ready")
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces")) klog.Info(e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces"))
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json")) klog.Info(e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
// Some pods are still not running. // Some pods are still not running.
return fmt.Errorf("Too many pods are still not running: %v", notready) return fmt.Errorf("Too many pods are still not running: %v", notready)

View File

@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
"k8s.io/kubernetes/test/utils/junit" "k8s.io/kubernetes/test/utils/junit"
) )
@ -80,7 +81,7 @@ func controlPlaneUpgrade(f *framework.Framework, v string, extraEnvs []string) e
case "gce": case "gce":
return controlPlaneUpgradeGCE(v, extraEnvs) return controlPlaneUpgradeGCE(v, extraEnvs)
case "gke": case "gke":
return framework.MasterUpgradeGKE(f.Namespace.Name, v) return e2eproviders.MasterUpgradeGKE(f.Namespace.Name, v)
default: default:
return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider) return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
} }
@ -101,7 +102,7 @@ func controlPlaneUpgradeGCE(rawV string, extraEnvs []string) error {
} }
v := "v" + rawV v := "v" + rawV
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-M", v) _, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-M", v)
return err return err
} }
@ -172,10 +173,10 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error {
env := append(os.Environ(), extraEnvs...) env := append(os.Environ(), extraEnvs...)
if img != "" { if img != "" {
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img) env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", "-o", v) _, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-N", "-o", v)
return err return err
} }
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", v) _, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-N", v)
return err return err
} }
@ -191,7 +192,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error {
"container", "container",
"clusters", "clusters",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
framework.LocationParamGKE(), e2eproviders.LocationParamGKE(),
"upgrade", "upgrade",
framework.TestContext.CloudConfig.Cluster, framework.TestContext.CloudConfig.Cluster,
fmt.Sprintf("--node-pool=%s", np), fmt.Sprintf("--node-pool=%s", np),
@ -207,7 +208,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error {
return err return err
} }
framework.WaitForSSHTunnels(namespace) e2enode.WaitForSSHTunnels(namespace)
} }
return nil return nil
} }
@ -217,7 +218,7 @@ func nodePoolsGKE() ([]string, error) {
"container", "container",
"node-pools", "node-pools",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
framework.LocationParamGKE(), e2eproviders.LocationParamGKE(),
"list", "list",
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster), fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
"--format=get(name)", "--format=get(name)",

View File

@ -183,7 +183,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
for _, zone := range additionalNodesZones { for _, zone := range additionalNodesZones {
removeWorkerNodes(zone) removeWorkerNodes(zone)
} }
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute)) framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute))
// Clean-up additional master replicas if the test execution was broken. // Clean-up additional master replicas if the test execution was broken.
for _, zone := range additionalReplicaZones { for _, zone := range additionalReplicaZones {
@ -218,7 +218,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone) additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone)
} }
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute)) framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute)) framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute))
// Verify that API server works correctly with HA master. // Verify that API server works correctly with HA master.
rcName := "ha-master-" + strconv.Itoa(len(existingRCs)) rcName := "ha-master-" + strconv.Itoa(len(existingRCs))

View File

@ -41,6 +41,7 @@ import (
commontest "k8s.io/kubernetes/test/e2e/common" commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/daemonset" "k8s.io/kubernetes/test/e2e/framework/daemonset"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -223,7 +224,7 @@ func setupSuite() {
// In large clusters we may get to this point but still have a bunch // In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node // of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable. // unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
@ -242,7 +243,7 @@ func setupSuite() {
// wasting the whole run), we allow for some not-ready pods (with the // wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes). // number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) e2edebug.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
framework.Failf("Error waiting for all pods to be running and ready: %v", err) framework.Failf("Error waiting for all pods to be running and ready: %v", err)
} }
@ -270,7 +271,7 @@ func setupSuite() {
} }
if framework.TestContext.NodeKiller.Enabled { if framework.TestContext.NodeKiller.Enabled {
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) nodeKiller := e2enode.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh) go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh)
} }
} }

View File

@ -18,7 +18,7 @@ package job
import ( import (
batchv1 "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"

View File

@ -18,6 +18,7 @@ package job
import ( import (
"context" "context"
batchv1 "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -21,7 +21,7 @@ import (
"time" "time"
batchv1 "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"

View File

@ -23,7 +23,7 @@ import (
"time" "time"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
utilyaml "k8s.io/apimachinery/pkg/util/yaml" utilyaml "k8s.io/apimachinery/pkg/util/yaml"

View File

@ -22,9 +22,10 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"sync"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/perftype" "k8s.io/kubernetes/test/e2e/perftype"
"sync"
) )
var now = time.Now var now = time.Now

View File

@ -23,10 +23,11 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config" e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -118,7 +119,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
// we don't validate total log data, since there is no guarantee all logs will be stored forever. // we don't validate total log data, since there is no guarantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out. // instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) { Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second) s, err := e2eoutput.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err return s != "", err
}, },
}, },

View File

@ -95,7 +95,7 @@ func (p *loadLoggingPod) Name() string {
func (p *loadLoggingPod) Start(f *framework.Framework) error { func (p *loadLoggingPod) Start(f *framework.Framework) error {
framework.Logf("Starting load logging pod %s", p.name) framework.Logf("Starting load logging pod %s", p.name)
f.PodClient().Create(&v1.Pod{ e2epod.NewPodClient(f).Create(&v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{ ObjectMeta: meta_v1.ObjectMeta{
Name: p.name, Name: p.name,
}, },
@ -161,7 +161,7 @@ func (p *execLoggingPod) Name() string {
func (p *execLoggingPod) Start(f *framework.Framework) error { func (p *execLoggingPod) Start(f *framework.Framework) error {
framework.Logf("Starting repeating logging pod %s", p.name) framework.Logf("Starting repeating logging pod %s", p.name)
f.PodClient().Create(&v1.Pod{ e2epod.NewPodClient(f).Create(&v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{ ObjectMeta: meta_v1.ObjectMeta{
Name: p.name, Name: p.name,
}, },

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
@ -81,7 +82,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
scheduling.SetupNVIDIAGPUNode(f, false) scheduling.SetupNVIDIAGPUNode(f, false)
f.PodClient().Create(&v1.Pod{ e2epod.NewPodClient(f).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: rcName, Name: rcName,
}, },

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
gcm "google.golang.org/api/monitoring/v3" gcm "google.golang.org/api/monitoring/v3"
@ -234,7 +235,7 @@ func CreateAdapter(adapterDeploymentFile string) error {
if err != nil { if err != nil {
return err return err
} }
stat, err := framework.RunKubectl("", "apply", "-f", adapterURL) stat, err := e2ekubectl.RunKubectl("", "apply", "-f", adapterURL)
framework.Logf(stat) framework.Logf(stat)
return err return err
} }
@ -247,7 +248,7 @@ func createClusterAdminBinding() error {
} }
serviceAccount := strings.TrimSpace(stdout) serviceAccount := strings.TrimSpace(stdout)
framework.Logf("current service account: %q", serviceAccount) framework.Logf("current service account: %q", serviceAccount)
stat, err := framework.RunKubectl("", "create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount) stat, err := e2ekubectl.RunKubectl("", "create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
framework.Logf(stat) framework.Logf(stat)
return err return err
} }
@ -287,7 +288,7 @@ func CleanupDescriptors(service *gcm.Service, projectID string) {
// CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments. // CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments.
func CleanupAdapter(adapterDeploymentFile string) { func CleanupAdapter(adapterDeploymentFile string) {
stat, err := framework.RunKubectl("", "delete", "-f", adapterDeploymentFile) stat, err := e2ekubectl.RunKubectl("", "delete", "-f", adapterDeploymentFile)
framework.Logf(stat) framework.Logf(stat)
if err != nil { if err != nil {
framework.Logf("Failed to delete adapter deployments: %s", err) framework.Logf("Failed to delete adapter deployments: %s", err)
@ -300,7 +301,7 @@ func CleanupAdapter(adapterDeploymentFile string) {
} }
func cleanupClusterAdminBinding() { func cleanupClusterAdminBinding() {
stat, err := framework.RunKubectl("", "delete", "clusterrolebinding", ClusterAdminBinding) stat, err := e2ekubectl.RunKubectl("", "delete", "clusterrolebinding", ClusterAdminBinding)
framework.Logf(stat) framework.Logf(stat)
if err != nil { if err != nil {
framework.Logf("Failed to delete cluster admin binding: %s", err) framework.Logf("Failed to delete cluster admin binding: %s", err)

View File

@ -21,7 +21,7 @@ import (
"time" "time"
gcm "google.golang.org/api/monitoring/v3" gcm "google.golang.org/api/monitoring/v3"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"

View File

@ -24,7 +24,7 @@ import (
"reflect" "reflect"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"

View File

@ -62,11 +62,13 @@ import (
commonutils "k8s.io/kubernetes/test/e2e/common" commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
@ -196,7 +198,7 @@ func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
ginkgo.By("using delete to clean up resources") ginkgo.By("using delete to clean up resources")
// support backward compatibility : file paths or raw json - since we are removing file path // support backward compatibility : file paths or raw json - since we are removing file path
// dependencies from this test. // dependencies from this test.
framework.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-")
assertCleanup(ns, selectors...) assertCleanup(ns, selectors...)
} }
@ -206,12 +208,12 @@ func assertCleanup(ns string, selectors ...string) {
verifyCleanupFunc := func() (bool, error) { verifyCleanupFunc := func() (bool, error) {
e = nil e = nil
for _, selector := range selectors { for _, selector := range selectors {
resources := framework.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers") resources := e2ekubectl.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers")
if resources != "" { if resources != "" {
e = fmt.Errorf("Resources left running after stop:\n%s", resources) e = fmt.Errorf("Resources left running after stop:\n%s", resources)
return false, nil return false, nil
} }
pods := framework.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") pods := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" { if pods != "" {
e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods) e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
return false, nil return false, nil
@ -237,7 +239,7 @@ func runKubectlRetryOrDie(ns string, args ...string) string {
var err error var err error
var output string var output string
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
output, err = framework.RunKubectl(ns, args...) output, err = e2ekubectl.RunKubectl(ns, args...)
if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) { if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
break break
} }
@ -282,7 +284,7 @@ var _ = SIGDescribe("Kubectl client", func() {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout) pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast { if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns) e2edebug.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err) framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
} }
} }
@ -338,7 +340,7 @@ var _ = SIGDescribe("Kubectl client", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller") ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
}) })
@ -351,15 +353,15 @@ var _ = SIGDescribe("Kubectl client", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
ginkgo.By("creating a replication controller") ginkgo.By("creating a replication controller")
framework.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling down the replication controller") ginkgo.By("scaling down the replication controller")
debugDiscovery() debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m") e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m")
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
ginkgo.By("scaling up the replication controller") ginkgo.By("scaling up the replication controller")
debugDiscovery() debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m") e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m")
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
}) })
}) })
@ -396,7 +398,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("creating all guestbook components") ginkgo.By("creating all guestbook components")
forEachGBFile(func(contents string) { forEachGBFile(func(contents string) {
framework.Logf(contents) framework.Logf(contents)
framework.RunKubectlOrDieInput(ns, contents, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
}) })
ginkgo.By("validating guestbook app") ginkgo.By("validating guestbook app")
@ -409,7 +411,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true) framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
@ -418,7 +420,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.It("should support exec", func() { ginkgo.It("should support exec", func() {
ginkgo.By("executing a command in the container") ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container") execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a { if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
@ -428,11 +430,11 @@ var _ = SIGDescribe("Kubectl client", func() {
for i := 0; i < len(veryLongData); i++ { for i := 0; i < len(veryLongData); i++ {
veryLongData[i] = 'a' veryLongData[i] = 'a'
} }
execOutput = framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData)) execOutput = e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData))
framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output") framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output")
ginkgo.By("executing a command in the container with noninteractive stdin") ginkgo.By("executing a command in the container with noninteractive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat"). execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
ExecOrDie(ns) ExecOrDie(ns)
if e, a := "abcd1234", execOutput; e != a { if e, a := "abcd1234", execOutput; e != a {
@ -448,7 +450,7 @@ var _ = SIGDescribe("Kubectl client", func() {
defer closer.Close() defer closer.Close()
ginkgo.By("executing a command in the container with pseudo-interactive stdin") ginkgo.By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh"). execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh").
WithStdinReader(r). WithStdinReader(r).
ExecOrDie(ns) ExecOrDie(ns)
if e, a := "hi", strings.TrimSpace(execOutput); e != a { if e, a := "hi", strings.TrimSpace(execOutput); e != a {
@ -458,7 +460,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.It("should support exec using resource/name", func() { ginkgo.It("should support exec using resource/name", func() {
ginkgo.By("executing a command in the container") ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container") execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a { if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
@ -478,7 +480,7 @@ var _ = SIGDescribe("Kubectl client", func() {
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} { for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
proxyLogs.Reset() proxyLogs.Reset()
ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar) ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar)
output := framework.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container"). output := e2ekubectl.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container").
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))). WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
ExecOrDie(ns) ExecOrDie(ns)
@ -512,7 +514,7 @@ var _ = SIGDescribe("Kubectl client", func() {
//proxyLogs.Reset() //proxyLogs.Reset()
host := fmt.Sprintf("--server=http://127.0.0.1:%d", port) host := fmt.Sprintf("--server=http://127.0.0.1:%d", port)
ginkgo.By("Running kubectl via kubectl proxy using " + host) ginkgo.By("Running kubectl via kubectl proxy using " + host)
output := framework.NewKubectlCommand( output := e2ekubectl.NewKubectlCommand(
ns, host, ns, host,
"exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container", "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container",
).ExecOrDie(ns) ).ExecOrDie(ns)
@ -526,12 +528,12 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.Context("should return command exit codes", func() { ginkgo.Context("should return command exit codes", func() {
ginkgo.It("execing into a container with a successful command", func() { ginkgo.It("execing into a container with a successful command", func() {
_, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec() _, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
ginkgo.It("execing into a container with a failing command", func() { ginkgo.It("execing into a container with a failing command", func() {
_, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec() _, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError) ee, ok := err.(uexec.ExitError)
if !ok { if !ok {
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err) framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
@ -540,12 +542,12 @@ var _ = SIGDescribe("Kubectl client", func() {
}) })
ginkgo.It("running a successful command", func() { ginkgo.It("running a successful command", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec() _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
ginkgo.It("running a failing command", func() { ginkgo.It("running a failing command", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError) ee, ok := err.(uexec.ExitError)
if !ok { if !ok {
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err) framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
@ -554,7 +556,7 @@ var _ = SIGDescribe("Kubectl client", func() {
}) })
ginkgo.It("[Slow] running a failing command without --restart=Never", func() { ginkgo.It("[Slow] running a failing command without --restart=Never", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
Exec() Exec()
ee, ok := err.(uexec.ExitError) ee, ok := err.(uexec.ExitError)
@ -567,7 +569,7 @@ var _ = SIGDescribe("Kubectl client", func() {
}) })
ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func() { ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
Exec() Exec()
ee, ok := err.(uexec.ExitError) ee, ok := err.(uexec.ExitError)
@ -581,7 +583,7 @@ var _ = SIGDescribe("Kubectl client", func() {
}) })
ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func() { ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func() {
_, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
Exec() Exec()
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -592,7 +594,7 @@ var _ = SIGDescribe("Kubectl client", func() {
waitForStdinContent := func(pod, content string) string { waitForStdinContent := func(pod, content string) string {
var logOutput string var logOutput string
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
logOutput = framework.RunKubectlOrDie(ns, "logs", pod) logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod)
return strings.Contains(logOutput, content), nil return strings.Contains(logOutput, content), nil
}) })
@ -602,7 +604,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("executing a command with run and attach with stdin") ginkgo.By("executing a command with run and attach with stdin")
// We wait for a non-empty line so we know kubectl has attached // We wait for a non-empty line so we know kubectl has attached
framework.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
WithStdinData("value\nabcd1234"). WithStdinData("value\nabcd1234").
ExecOrDie(ns) ExecOrDie(ns)
@ -619,7 +621,7 @@ var _ = SIGDescribe("Kubectl client", func() {
// "stdin closed", but hasn't exited yet. // "stdin closed", but hasn't exited yet.
// We wait 10 seconds before printing to give time to kubectl to attach // We wait 10 seconds before printing to give time to kubectl to attach
// to the container, this does not solve the race though. // to the container, this does not solve the race though.
framework.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
ExecOrDie(ns) ExecOrDie(ns)
@ -630,7 +632,7 @@ var _ = SIGDescribe("Kubectl client", func() {
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil()) gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
framework.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n"). WithStdinData("abcd1234\n").
ExecOrDie(ns) ExecOrDie(ns)
@ -652,13 +654,13 @@ var _ = SIGDescribe("Kubectl client", func() {
podName := "run-log-test" podName := "run-log-test"
ginkgo.By("executing a command with run") ginkgo.By("executing a command with run")
framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod for run-log-test was not ready") framework.Failf("Pod for run-log-test was not ready")
} }
logOutput := framework.RunKubectlOrDie(ns, "logs", "-f", "run-log-test") logOutput := e2ekubectl.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF")) gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
}) })
@ -711,11 +713,11 @@ var _ = SIGDescribe("Kubectl client", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
kubectlPath = strings.TrimSpace(string(kubectlPathNormalized)) kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST")) inClusterHost := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT")) inClusterPort := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort) inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort)
framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName) framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/") e2ekubectl.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token, // Build a kubeconfig file that will make use of the injected ca and token,
// but point at the DNS host and the default namespace // but point at the DNS host and the default namespace
@ -745,7 +747,7 @@ users:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755))) `), os.FileMode(0755)))
framework.Logf("copying override kubeconfig to the %s pod", simplePodName) framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/") e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(` framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
kind: ConfigMap kind: ConfigMap
@ -761,30 +763,30 @@ metadata:
name: "configmap without namespace and invalid name" name: "configmap without namespace and invalid name"
`), os.FileMode(0755))) `), os.FileMode(0755)))
framework.Logf("copying configmap manifests to the %s pod", simplePodName) framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
ginkgo.By("getting pods with in-cluster configs") ginkgo.By("getting pods with in-cluster configs")
execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1") execOutput := e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running")) gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
ginkgo.By("creating an object containing a namespace with in-cluster config") ginkgo.By("creating an object containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1") _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL))) gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL)))
ginkgo.By("creating an object not containing a namespace with in-cluster config") ginkgo.By("creating an object not containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1") _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1")
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name))) gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name)))
ginkgo.By("trying to use kubectl with invalid token") ginkgo.By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err) framework.Logf("got err %v", err)
framework.ExpectError(err) framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
@ -792,21 +794,21 @@ metadata:
gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized")) gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized"))
ginkgo.By("trying to use kubectl with invalid server") ginkgo.By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1") _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
framework.Logf("got err %v", err) framework.Logf("got err %v", err)
framework.ExpectError(err) framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server")) gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api")) gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
ginkgo.By("trying to use kubectl with invalid namespace") ginkgo.By("trying to use kubectl with invalid namespace")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1") execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found")) gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort))) gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
ginkgo.By("trying to use kubectl with kubeconfig") ginkgo.By("trying to use kubectl with kubeconfig")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1") execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration"))
gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods")) gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
@ -821,7 +823,7 @@ metadata:
*/ */
framework.ConformanceIt("should check if v1 is in available api versions ", func() { framework.ConformanceIt("should check if v1 is in available api versions ", func() {
ginkgo.By("validating api versions") ginkgo.By("validating api versions")
output := framework.RunKubectlOrDie(ns, "api-versions") output := e2ekubectl.RunKubectlOrDie(ns, "api-versions")
if !strings.Contains(output, "v1") { if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions") framework.Failf("No v1 in kubectl api-versions")
} }
@ -831,12 +833,12 @@ metadata:
ginkgo.Describe("Kubectl get componentstatuses", func() { ginkgo.Describe("Kubectl get componentstatuses", func() {
ginkgo.It("should get componentstatuses", func() { ginkgo.It("should get componentstatuses", func() {
ginkgo.By("getting list of componentstatuses") ginkgo.By("getting list of componentstatuses")
output := framework.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") output := e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}")
components := strings.Split(output, " ") components := strings.Split(output, " ")
ginkgo.By("getting details of componentstatuses") ginkgo.By("getting details of componentstatuses")
for _, component := range components { for _, component := range components {
ginkgo.By("getting status of " + component) ginkgo.By("getting status of " + component)
framework.RunKubectlOrDie(ns, "get", "componentstatuses", component) e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", component)
} }
}) })
}) })
@ -846,10 +848,10 @@ metadata:
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC") ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("applying a modified configuration") ginkgo.By("applying a modified configuration")
stdin := modifyReplicationControllerConfiguration(controllerJSON) stdin := modifyReplicationControllerConfiguration(controllerJSON)
framework.NewKubectlCommand(ns, "apply", "-f", "-"). e2ekubectl.NewKubectlCommand(ns, "apply", "-f", "-").
WithStdinReader(stdin). WithStdinReader(stdin).
ExecOrDie(ns) ExecOrDie(ns)
ginkgo.By("checking the result") ginkgo.By("checking the result")
@ -859,16 +861,16 @@ metadata:
serviceJSON := readTestFileOrDie(agnhostServiceFilename) serviceJSON := readTestFileOrDie(agnhostServiceFilename)
ginkgo.By("creating Agnhost SVC") ginkgo.By("creating Agnhost SVC")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("getting the original port") ginkgo.By("getting the original port")
originalNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}") originalNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("applying the same configuration") ginkgo.By("applying the same configuration")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-")
ginkgo.By("getting the port after applying configuration") ginkgo.By("getting the port after applying configuration")
currentNodePort := framework.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}") currentNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
ginkgo.By("checking the result") ginkgo.By("checking the result")
if originalNodePort != currentNodePort { if originalNodePort != currentNodePort {
@ -882,20 +884,20 @@ metadata:
deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename))) deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
ginkgo.By("deployment replicas number is 2") ginkgo.By("deployment replicas number is 2")
framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-")
ginkgo.By("check the last-applied matches expectations annotations") ginkgo.By("check the last-applied matches expectations annotations")
output := framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json") output := e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString := "\"replicas\": 2" requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) { if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString) framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
} }
ginkgo.By("apply file doesn't have replicas") ginkgo.By("apply file doesn't have replicas")
framework.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-")
ginkgo.By("check last-applied has been updated, annotations doesn't have replicas") ginkgo.By("check last-applied has been updated, annotations doesn't have replicas")
output = framework.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json") output = e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
requiredString = "\"replicas\": 2" requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) { if strings.Contains(output, requiredString) {
framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
@ -904,13 +906,13 @@ metadata:
ginkgo.By("scale set replicas to 3") ginkgo.By("scale set replicas to 3")
httpdDeploy := "httpd-deployment" httpdDeploy := "httpd-deployment"
debugDiscovery() debugDiscovery()
framework.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3") e2ekubectl.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3")
ginkgo.By("apply file doesn't have replicas but image changed") ginkgo.By("apply file doesn't have replicas but image changed")
framework.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-")
ginkgo.By("verify replicas still is 3 and image has been updated") ginkgo.By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json") output = e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json")
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)} requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(output, item) { if !strings.Contains(output, item) {
@ -929,14 +931,14 @@ metadata:
framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func() { framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func() {
ginkgo.By("create deployment with httpd image") ginkgo.By("create deployment with httpd image")
deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename))) deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
framework.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-")
ginkgo.By("verify diff finds difference between live and declared image") ginkgo.By("verify diff finds difference between live and declared image")
deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1) deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1)
if !strings.Contains(deployment, busyboxImage) { if !strings.Contains(deployment, busyboxImage) {
framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment) framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment)
} }
output, err := framework.RunKubectlInput(ns, deployment, "diff", "-f", "-") output, err := e2ekubectl.RunKubectlInput(ns, deployment, "diff", "-f", "-")
if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 { if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 {
framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err) framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err)
} }
@ -947,7 +949,7 @@ metadata:
} }
} }
framework.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-")
}) })
}) })
@ -960,11 +962,11 @@ metadata:
framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() { framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() {
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
podName := "e2e-test-httpd-pod" podName := "e2e-test-httpd-pod"
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
ginkgo.By("replace the image in the pod with server-side dry-run") ginkgo.By("replace the image in the pod with server-side dry-run")
specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, busyboxImage) specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, busyboxImage)
framework.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server") e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server")
ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage) ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
@ -976,7 +978,7 @@ metadata:
framework.Failf("Failed creating pod with expected image %s", httpdImage) framework.Failf("Failed creating pod with expected image %s", httpdImage)
} }
framework.RunKubectlOrDie(ns, "delete", "pods", podName) e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
}) })
}) })
@ -1166,7 +1168,7 @@ metadata:
}` }`
meta := unknownFieldMetadataJSON(gvk, "test-cr") meta := unknownFieldMetadataJSON(gvk, "test-cr")
unknownRootMetaCR := fmt.Sprintf(embeddedCRPattern, meta, "", ns) unknownRootMetaCR := fmt.Sprintf(embeddedCRPattern, meta, "", ns)
_, err = framework.RunKubectlInput(ns, unknownRootMetaCR, "create", "--validate=true", "-f", "-") _, err = e2ekubectl.RunKubectlInput(ns, unknownRootMetaCR, "create", "--validate=true", "-f", "-")
if err == nil { if err == nil {
framework.Failf("unexpected nil error when creating CR with unknown root metadata field") framework.Failf("unexpected nil error when creating CR with unknown root metadata field")
} }
@ -1180,7 +1182,7 @@ metadata:
ginkgo.By("attempting to create a CR with unknown metadata fields in the embedded object") ginkgo.By("attempting to create a CR with unknown metadata fields in the embedded object")
metaEmbedded := fmt.Sprintf(metaPattern, testCRD.Crd.Spec.Names.Kind, testCRD.Crd.Spec.Group, testCRD.Crd.Spec.Versions[0].Name, "test-cr-embedded") metaEmbedded := fmt.Sprintf(metaPattern, testCRD.Crd.Spec.Names.Kind, testCRD.Crd.Spec.Group, testCRD.Crd.Spec.Versions[0].Name, "test-cr-embedded")
unknownEmbeddedMetaCR := fmt.Sprintf(embeddedCRPattern, metaEmbedded, `"unknownMetaEmbedded": "bar",`, ns) unknownEmbeddedMetaCR := fmt.Sprintf(embeddedCRPattern, metaEmbedded, `"unknownMetaEmbedded": "bar",`, ns)
_, err = framework.RunKubectlInput(ns, unknownEmbeddedMetaCR, "create", "--validate=true", "-f", "-") _, err = e2ekubectl.RunKubectlInput(ns, unknownEmbeddedMetaCR, "create", "--validate=true", "-f", "-")
if err == nil { if err == nil {
framework.Failf("unexpected nil error when creating CR with unknown embedded metadata field") framework.Failf("unexpected nil error when creating CR with unknown embedded metadata field")
} }
@ -1225,7 +1227,7 @@ metadata:
} }
} }
` `
_, err := framework.RunKubectlInput(ns, invalidMetaDeployment, "create", "-f", "-") _, err := e2ekubectl.RunKubectlInput(ns, invalidMetaDeployment, "create", "-f", "-")
if err == nil { if err == nil {
framework.Failf("unexpected nil error when creating deployment with unknown metadata field") framework.Failf("unexpected nil error when creating deployment with unknown metadata field")
} }
@ -1247,7 +1249,7 @@ metadata:
*/ */
framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func() { framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func() {
ginkgo.By("validating cluster-info") ginkgo.By("validating cluster-info")
output := framework.RunKubectlOrDie(ns, "cluster-info") output := e2ekubectl.RunKubectlOrDie(ns, "cluster-info")
// Can't check exact strings due to terminal control commands (colors) // Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes control plane", "is running at"} requiredItems := []string{"Kubernetes control plane", "is running at"}
for _, item := range requiredItems { for _, item := range requiredItems {
@ -1261,7 +1263,7 @@ metadata:
ginkgo.Describe("Kubectl cluster-info dump", func() { ginkgo.Describe("Kubectl cluster-info dump", func() {
ginkgo.It("should check if cluster-info dump succeeds", func() { ginkgo.It("should check if cluster-info dump succeeds", func() {
ginkgo.By("running cluster-info dump") ginkgo.By("running cluster-info dump")
framework.RunKubectlOrDie(ns, "cluster-info", "dump") e2ekubectl.RunKubectlOrDie(ns, "cluster-info", "dump")
}) })
}) })
@ -1275,15 +1277,15 @@ metadata:
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
serviceJSON := readTestFileOrDie(agnhostServiceFilename) serviceJSON := readTestFileOrDie(agnhostServiceFilename)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
framework.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.") ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
// Pod // Pod
forEachPod(func(pod v1.Pod) { forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie(ns, "describe", "pod", pod.Name) output := e2ekubectl.RunKubectlOrDie(ns, "describe", "pod", pod.Name)
requiredStrings := [][]string{ requiredStrings := [][]string{
{"Name:", "agnhost-primary-"}, {"Name:", "agnhost-primary-"},
{"Namespace:", ns}, {"Namespace:", ns},
@ -1317,7 +1319,7 @@ metadata:
checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary") checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary")
// Service // Service
output := framework.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary") output := e2ekubectl.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary")
requiredStrings = [][]string{ requiredStrings = [][]string{
{"Name:", "agnhost-primary"}, {"Name:", "agnhost-primary"},
{"Namespace:", ns}, {"Namespace:", ns},
@ -1337,7 +1339,7 @@ metadata:
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
node := nodes.Items[0] node := nodes.Items[0]
output = framework.RunKubectlOrDie(ns, "describe", "node", node.Name) output = e2ekubectl.RunKubectlOrDie(ns, "describe", "node", node.Name)
requiredStrings = [][]string{ requiredStrings = [][]string{
{"Name:", node.Name}, {"Name:", node.Name},
{"Labels:"}, {"Labels:"},
@ -1357,7 +1359,7 @@ metadata:
checkOutput(output, requiredStrings) checkOutput(output, requiredStrings)
// Namespace // Namespace
output = framework.RunKubectlOrDie(ns, "describe", "namespace", ns) output = e2ekubectl.RunKubectlOrDie(ns, "describe", "namespace", ns)
requiredStrings = [][]string{ requiredStrings = [][]string{
{"Name:", ns}, {"Name:", ns},
{"Labels:"}, {"Labels:"},
@ -1371,7 +1373,7 @@ metadata:
ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func() { ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func() {
ginkgo.By("creating a cronjob") ginkgo.By("creating a cronjob")
cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in"))) cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in")))
framework.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-")
ginkgo.By("waiting for cronjob to start.") ginkgo.By("waiting for cronjob to start.")
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
@ -1384,7 +1386,7 @@ metadata:
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("verifying kubectl describe prints") ginkgo.By("verifying kubectl describe prints")
output := framework.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test") output := e2ekubectl.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test")
requiredStrings := [][]string{ requiredStrings := [][]string{
{"Name:", "cronjob-test"}, {"Name:", "cronjob-test"},
{"Namespace:", ns}, {"Namespace:", ns},
@ -1418,14 +1420,14 @@ metadata:
ginkgo.By("creating Agnhost RC") ginkgo.By("creating Agnhost RC")
framework.Logf("namespace %v", ns) framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
// It may take a while for the pods to get registered in some cases, wait to be sure. // It may take a while for the pods to get registered in some cases, wait to be sure.
ginkgo.By("Waiting for Agnhost primary to start.") ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) { forEachPod(func(pod v1.Pod) {
framework.Logf("wait on agnhost-primary startup in %v ", ns) framework.Logf("wait on agnhost-primary startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout) e2eoutput.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout)
}) })
validateService := func(name string, servicePort int, timeout time.Duration) { validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
@ -1476,12 +1478,12 @@ metadata:
} }
ginkgo.By("exposing RC") ginkgo.By("exposing RC")
framework.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort)) e2ekubectl.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout) e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm2", 1234, framework.ServiceStartTimeout) validateService("rm2", 1234, framework.ServiceStartTimeout)
ginkgo.By("exposing service") ginkgo.By("exposing service")
framework.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort)) e2ekubectl.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort))
e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout) e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm3", 2345, framework.ServiceStartTimeout) validateService("rm3", 2345, framework.ServiceStartTimeout)
}) })
@ -1492,7 +1494,7 @@ metadata:
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true) framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
@ -1509,17 +1511,17 @@ metadata:
labelValue := "testing-label-value" labelValue := "testing-label-value"
ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod") ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue) e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue)
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if !strings.Contains(output, labelValue) { if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
} }
ginkgo.By("removing the label " + labelName + " of a pod") ginkgo.By("removing the label " + labelName + " of a pod")
framework.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-") e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-")
ginkgo.By("verifying the pod doesn't have the label " + labelName) ginkgo.By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName) output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
if strings.Contains(output, labelValue) { if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
} }
@ -1531,7 +1533,7 @@ metadata:
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in"))) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true) framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true)
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
@ -1552,7 +1554,7 @@ metadata:
} }
ginkgo.By("specifying a remote filepath " + podSource + " on the pod") ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
framework.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name()) e2ekubectl.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name())
ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
localData, err := io.ReadAll(tempDestination) localData, err := io.ReadAll(tempDestination)
if err != nil { if err != nil {
@ -1570,10 +1572,10 @@ metadata:
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
ginkgo.By("creating an pod") ginkgo.By("creating an pod")
// Agnhost image generates logs for a total of 100 lines over 20s. // Agnhost image generates logs for a total of 100 lines over 20s.
framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s")
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pod", podName) e2ekubectl.RunKubectlOrDie(ns, "delete", "pod", podName)
}) })
/* /*
@ -1600,23 +1602,23 @@ metadata:
} }
ginkgo.By("checking for a matching strings") ginkgo.By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, podName, containerName, "/api/v1/namespaces/kube-system", framework.PodStartTimeout) _, err := e2eoutput.LookForStringInLog(ns, podName, containerName, "/api/v1/namespaces/kube-system", framework.PodStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("limiting log lines") ginkgo.By("limiting log lines")
out := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1") out := e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1")
framework.Logf("got output %q", out) framework.Logf("got output %q", out)
gomega.Expect(len(out)).NotTo(gomega.BeZero()) gomega.Expect(len(out)).NotTo(gomega.BeZero())
framework.ExpectEqual(len(lines(out)), 1) framework.ExpectEqual(len(lines(out)), 1)
ginkgo.By("limiting log bytes") ginkgo.By("limiting log bytes")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--limit-bytes=1") out = e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--limit-bytes=1")
framework.Logf("got output %q", out) framework.Logf("got output %q", out)
framework.ExpectEqual(len(lines(out)), 1) framework.ExpectEqual(len(lines(out)), 1)
framework.ExpectEqual(len(out), 1) framework.ExpectEqual(len(out), 1)
ginkgo.By("exposing timestamps") ginkgo.By("exposing timestamps")
out = framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1", "--timestamps") out = e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--tail=1", "--timestamps")
framework.Logf("got output %q", out) framework.Logf("got output %q", out)
l := lines(out) l := lines(out)
framework.ExpectEqual(len(l), 1) framework.ExpectEqual(len(l), 1)
@ -1633,9 +1635,9 @@ metadata:
// because the granularity is only 1 second and // because the granularity is only 1 second and
// it could end up rounding the wrong way. // it could end up rounding the wrong way.
time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s
recentOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=1s") recentOut := e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=1s")
recent := len(strings.Split(recentOut, "\n")) recent := len(strings.Split(recentOut, "\n"))
olderOut := framework.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=24h") olderOut := e2ekubectl.RunKubectlOrDie(ns, "logs", podName, containerName, "--since=24h")
older := len(strings.Split(olderOut, "\n")) older := len(strings.Split(olderOut, "\n"))
gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut) gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut)
}) })
@ -1650,12 +1652,12 @@ metadata:
framework.ConformanceIt("should add annotations for pods in rc ", func() { framework.ConformanceIt("should add annotations for pods in rc ", func() {
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
ginkgo.By("creating Agnhost RC") ginkgo.By("creating Agnhost RC")
framework.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
ginkgo.By("Waiting for Agnhost primary to start.") ginkgo.By("Waiting for Agnhost primary to start.")
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
ginkgo.By("patching all pods") ginkgo.By("patching all pods")
forEachPod(func(pod v1.Pod) { forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
}) })
ginkgo.By("checking annotations") ginkgo.By("checking annotations")
@ -1681,7 +1683,7 @@ metadata:
Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to. Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to.
*/ */
framework.ConformanceIt("should check is all data is printed ", func() { framework.ConformanceIt("should check is all data is printed ", func() {
versionString := framework.RunKubectlOrDie(ns, "version") versionString := e2ekubectl.RunKubectlOrDie(ns, "version")
// we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+', GitCommit -> alphanumeric // we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+', GitCommit -> alphanumeric
requiredItems := []string{"Client Version: ", "Server Version: "} requiredItems := []string{"Client Version: ", "Server Version: "}
for _, item := range requiredItems { for _, item := range requiredItems {
@ -1700,7 +1702,7 @@ metadata:
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName) e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
}) })
/* /*
@ -1710,7 +1712,7 @@ metadata:
*/ */
framework.ConformanceIt("should create a pod from an image when restart is Never ", func() { framework.ConformanceIt("should create a pod from an image when restart is Never ", func() {
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage)
ginkgo.By("verifying the pod " + podName + " was created") ginkgo.By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil { if err != nil {
@ -1734,7 +1736,7 @@ metadata:
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
framework.RunKubectlOrDie(ns, "delete", "pods", podName) e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
}) })
/* /*
@ -1744,7 +1746,7 @@ metadata:
*/ */
framework.ConformanceIt("should update a single-container pod's image ", func() { framework.ConformanceIt("should update a single-container pod's image ", func() {
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
ginkgo.By("verifying the pod " + podName + " is running") ginkgo.By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
@ -1754,14 +1756,14 @@ metadata:
} }
ginkgo.By("verifying the pod " + podName + " was created") ginkgo.By("verifying the pod " + podName + " was created")
podJSON := framework.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json") podJSON := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
if !strings.Contains(podJSON, podName) { if !strings.Contains(podJSON, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJSON) framework.Failf("Failed to find pod %s in [%s]", podName, podJSON)
} }
ginkgo.By("replace the image in the pod") ginkgo.By("replace the image in the pod")
podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1) podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
@ -1943,7 +1945,7 @@ metadata:
ginkgo.It("should show event when pod is created ", func() { ginkgo.It("should show event when pod is created ", func() {
podName := "e2e-test-httpd-pod" podName := "e2e-test-httpd-pod"
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
ginkgo.By("verifying the pod " + podName + " is running") ginkgo.By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(map[string]string{"run": podName}) label := labels.SelectorFromSet(map[string]string{"run": podName})
@ -1953,14 +1955,14 @@ metadata:
} }
ginkgo.By("show started event for this pod") ginkgo.By("show started event for this pod")
events := framework.RunKubectlOrDie(ns, "alpha", "events", "--for=pod/"+podName) events := e2ekubectl.RunKubectlOrDie(ns, "alpha", "events", "--for=pod/"+podName)
if !strings.Contains(events, fmt.Sprintf("Normal Scheduled Pod/%s", podName)) { if !strings.Contains(events, fmt.Sprintf("Normal Scheduled Pod/%s", podName)) {
framework.Failf("failed to list expected event") framework.Failf("failed to list expected event")
} }
ginkgo.By("expect not showing any WARNING message") ginkgo.By("expect not showing any WARNING message")
events = framework.RunKubectlOrDie(ns, "alpha", "events", "--types=WARNING", "--for=pod/"+podName) events = e2ekubectl.RunKubectlOrDie(ns, "alpha", "events", "--types=WARNING", "--for=pod/"+podName)
if events != "" { if events != "" {
framework.Failf("unexpected WARNING event fired") framework.Failf("unexpected WARNING event fired")
} }
@ -1972,7 +1974,7 @@ metadata:
quotaName := "million" quotaName := "million"
ginkgo.By("calling kubectl quota") ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000") e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000")
ginkgo.By("verifying that the quota was created") ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
@ -2000,7 +2002,7 @@ metadata:
quotaName := "scopes" quotaName := "scopes"
ginkgo.By("calling kubectl quota") ginkgo.By("calling kubectl quota")
framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating") e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating")
ginkgo.By("verifying that the quota was created") ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
@ -2027,7 +2029,7 @@ metadata:
quotaName := "scopes" quotaName := "scopes"
ginkgo.By("calling kubectl quota") ginkgo.By("calling kubectl quota")
out, err := framework.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo") out, err := e2ekubectl.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo")
if err == nil { if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out) framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
} }
@ -2037,8 +2039,8 @@ metadata:
ginkgo.Describe("kubectl wait", func() { ginkgo.Describe("kubectl wait", func() {
ginkgo.It("should ignore not found error with --for=delete", func() { ginkgo.It("should ignore not found error with --for=delete", func() {
ginkgo.By("calling kubectl wait --for=delete") ginkgo.By("calling kubectl wait --for=delete")
framework.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist") e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist")
framework.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist") e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist")
}) })
}) })
}) })
@ -2073,7 +2075,7 @@ func checkOutput(output string, required [][]string) {
func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) { func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) {
var pollErr error var pollErr error
wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output := framework.RunKubectlOrDie(namespace, args...) output := e2ekubectl.RunKubectlOrDie(namespace, args...)
err := checkOutputReturnError(output, required) err := checkOutputReturnError(output, required)
if err != nil { if err != nil {
pollErr = err pollErr = err
@ -2342,17 +2344,17 @@ func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
// without being rejected by kubectl validation // without being rejected by kubectl validation
func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error { func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
ginkgo.By("successfully create CR") ginkgo.By("successfully create CR")
if _, err := framework.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err) return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err)
} }
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err) return fmt.Errorf("failed to delete CR %s: %v", name, err)
} }
ginkgo.By("successfully apply CR") ginkgo.By("successfully apply CR")
if _, err := framework.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err) return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err)
} }
if _, err := framework.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err) return fmt.Errorf("failed to delete CR %s: %v", name, err)
} }
return nil return nil
@ -2387,7 +2389,7 @@ func validateController(c clientset.Interface, containerImage string, replicas i
ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop: waitLoop:
for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := framework.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname) getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname)
pods := strings.Fields(getPodsOutput) pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas { if numPods := len(pods); numPods != replicas {
ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods)) ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
@ -2395,13 +2397,13 @@ waitLoop:
} }
var runningPods []string var runningPods []string
for _, podID := range pods { for _, podID := range pods {
running := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate) running := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate)
if running != "true" { if running != "true" {
framework.Logf("%s is created but not running", podID) framework.Logf("%s is created but not running", podID)
continue waitLoop continue waitLoop
} }
currentImage := framework.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate) currentImage := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate)
currentImage = trimDockerRegistry(currentImage) currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage { if currentImage != containerImage {
framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage) framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)

View File

@ -150,7 +150,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort) cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort)
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
clientPod.Spec.Containers[0].Name = podClient clientPod.Spec.Containers[0].Name = podClient
fr.PodClient().CreateSync(clientPod) e2epod.NewPodClient(fr).CreateSync(clientPod)
// Read the client pod logs // Read the client pod logs
logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient)
@ -163,7 +163,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
serverPod1.Labels = udpJig.Labels serverPod1.Labels = udpJig.Labels
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
fr.PodClient().CreateSync(serverPod1) e2epod.NewPodClient(fr).CreateSync(serverPod1)
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}})
@ -186,11 +186,11 @@ var _ = common.SIGDescribe("Conntrack", func() {
serverPod2.Labels = udpJig.Labels serverPod2.Labels = udpJig.Labels
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
fr.PodClient().CreateSync(serverPod2) e2epod.NewPodClient(fr).CreateSync(serverPod2)
// and delete the first pod // and delete the first pod
framework.Logf("Cleaning up %s pod", podBackend1) framework.Logf("Cleaning up %s pod", podBackend1)
fr.PodClient().DeleteSync(podBackend1, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) e2epod.NewPodClient(fr).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}})
@ -226,7 +226,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port) cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
clientPod.Spec.Containers[0].Name = podClient clientPod.Spec.Containers[0].Name = podClient
fr.PodClient().CreateSync(clientPod) e2epod.NewPodClient(fr).CreateSync(clientPod)
// Read the client pod logs // Read the client pod logs
logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient)
@ -239,7 +239,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
serverPod1.Labels = udpJig.Labels serverPod1.Labels = udpJig.Labels
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
fr.PodClient().CreateSync(serverPod1) e2epod.NewPodClient(fr).CreateSync(serverPod1)
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}})
@ -262,11 +262,11 @@ var _ = common.SIGDescribe("Conntrack", func() {
serverPod2.Labels = udpJig.Labels serverPod2.Labels = udpJig.Labels
nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name} nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection) e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
fr.PodClient().CreateSync(serverPod2) e2epod.NewPodClient(fr).CreateSync(serverPod2)
// and delete the first pod // and delete the first pod
framework.Logf("Cleaning up %s pod", podBackend1) framework.Logf("Cleaning up %s pod", podBackend1)
fr.PodClient().DeleteSync(podBackend1, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) e2epod.NewPodClient(fr).DeleteSync(podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend2: {80}})
@ -313,7 +313,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port) cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
clientPod.Spec.Containers[0].Name = podClient clientPod.Spec.Containers[0].Name = podClient
fr.PodClient().CreateSync(clientPod) e2epod.NewPodClient(fr).CreateSync(clientPod)
// Read the client pod logs // Read the client pod logs
logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient) logs, err := e2epod.GetPodLogs(cs, ns, podClient, podClient)
@ -334,7 +334,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
}, },
} }
e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection) e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
fr.PodClient().CreateSync(serverPod1) e2epod.NewPodClient(fr).CreateSync(serverPod1)
// wait until the endpoints are ready // wait until the endpoints are ready
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}}) validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podBackend1: {80}})
@ -411,7 +411,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
} }
nodeSelection := e2epod.NodeSelection{Name: serverNodeInfo.name} nodeSelection := e2epod.NodeSelection{Name: serverNodeInfo.name}
e2epod.SetNodeSelection(&serverPod.Spec, nodeSelection) e2epod.SetNodeSelection(&serverPod.Spec, nodeSelection)
fr.PodClient().CreateSync(serverPod) e2epod.NewPodClient(fr).CreateSync(serverPod)
ginkgo.By("Server pod created on node " + serverNodeInfo.name) ginkgo.By("Server pod created on node " + serverNodeInfo.name)
svc := &v1.Service{ svc := &v1.Service{
@ -453,7 +453,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
nodeSelection = e2epod.NodeSelection{Name: clientNodeInfo.name} nodeSelection = e2epod.NodeSelection{Name: clientNodeInfo.name}
e2epod.SetNodeSelection(&pod.Spec, nodeSelection) e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
fr.PodClient().CreateSync(pod) e2epod.NewPodClient(fr).CreateSync(pod)
ginkgo.By("Client pod created") ginkgo.By("Client pod created")
// The client will open connections against the server // The client will open connections against the server

View File

@ -432,7 +432,7 @@ var _ = common.SIGDescribe("DNS", func() {
runCommand := func(arg string) string { runCommand := func(arg string) string {
cmd := []string{"/agnhost", arg} cmd := []string{"/agnhost", arg}
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ stdout, stderr, err := e2epod.ExecWithOptions(f, e2epod.ExecOptions{
Command: cmd, Command: cmd,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
PodName: testAgnhostPod.Name, PodName: testAgnhostPod.Name,
@ -524,7 +524,7 @@ var _ = common.SIGDescribe("DNS", func() {
ginkgo.By("Verifying customized DNS option is configured on pod...") ginkgo.By("Verifying customized DNS option is configured on pod...")
// TODO: Figure out a better way other than checking the actual resolv,conf file. // TODO: Figure out a better way other than checking the actual resolv,conf file.
cmd := []string{"cat", "/etc/resolv.conf"} cmd := []string{"cat", "/etc/resolv.conf"}
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ stdout, stderr, err := e2epod.ExecWithOptions(f, e2epod.ExecOptions{
Command: cmd, Command: cmd,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
PodName: testUtilsPod.Name, PodName: testUtilsPod.Name,
@ -544,7 +544,7 @@ var _ = common.SIGDescribe("DNS", func() {
// - DNS query is sent to the specified server. // - DNS query is sent to the specified server.
cmd = []string{"dig", "+short", "+search", testDNSNameShort} cmd = []string{"dig", "+short", "+search", testDNSNameShort}
digFunc := func() (bool, error) { digFunc := func() (bool, error) {
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ stdout, stderr, err := e2epod.ExecWithOptions(f, e2epod.ExecOptions{
Command: cmd, Command: cmd,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
PodName: testUtilsPod.Name, PodName: testUtilsPod.Name,

View File

@ -126,7 +126,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
} }
cmd = append(cmd, dnsName) cmd = append(cmd, dnsName)
stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{ stdout, stderr, err := e2epod.ExecWithOptions(t.f, e2epod.ExecOptions{
Command: cmd, Command: cmd,
Namespace: t.f.Namespace.Name, Namespace: t.f.Namespace.Name,
PodName: t.utilPod.Name, PodName: t.utilPod.Name,

View File

@ -47,7 +47,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute) e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute)
err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name) err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name)

View File

@ -35,6 +35,7 @@ import (
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -48,11 +49,11 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var cs clientset.Interface var cs clientset.Interface
var podClient *framework.PodClient var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
cs = f.ClientSet cs = f.ClientSet
podClient = f.PodClient() podClient = e2epod.NewPodClient(f)
}) })
ginkgo.It("should have ipv4 and ipv6 internal node ip", func() { ginkgo.It("should have ipv4 and ipv6 internal node ip", func() {
@ -763,7 +764,7 @@ func assertNetworkConnectivity(f *framework.Framework, serverPods v1.PodList, cl
gomega.Consistently(func() error { gomega.Consistently(func() error {
ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %s", clientPod.Name, ip, port)) ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %s", clientPod.Name, ip, port))
cmd := checkNetworkConnectivity(ip, port, timeout) cmd := checkNetworkConnectivity(ip, port, timeout)
_, _, err := f.ExecCommandInContainerWithFullOutput(clientPod.Name, containerName, cmd...) _, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, clientPod.Name, containerName, cmd...)
return err return err
}, duration, pollInterval).ShouldNot(gomega.HaveOccurred()) }, duration, pollInterval).ShouldNot(gomega.HaveOccurred())
} }

View File

@ -34,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -46,11 +47,11 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var cs clientset.Interface var cs clientset.Interface
var podClient *framework.PodClient var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
cs = f.ClientSet cs = f.ClientSet
podClient = f.PodClient() podClient = e2epod.NewPodClient(f)
}) })
/* /*

View File

@ -32,8 +32,10 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
@ -95,11 +97,11 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
} }
for _, ns := range namespaces { for _, ns := range namespaces {
framework.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-rc.yaml")), "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-rc.yaml")), "create", "-f", "-")
} }
for _, ns := range namespaces { for _, ns := range namespaces {
framework.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-service.yaml")), "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-service.yaml")), "create", "-f", "-")
} }
// wait for objects // wait for objects
@ -140,14 +142,14 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
podName := pods.Items[0].Name podName := pods.Items[0].Name
queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendName+"."+namespaces[0].Name) queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout) _, err = e2eoutput.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout)
framework.ExpectNoError(err, "waiting for output from pod exec") framework.ExpectNoError(err, "waiting for output from pod exec")
updatedPodYaml := strings.Replace(read(filepath.Join(clusterDnsPath, "dns-frontend-pod.yaml")), fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain), 1) updatedPodYaml := strings.Replace(read(filepath.Join(clusterDnsPath, "dns-frontend-pod.yaml")), fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain), 1)
// create a pod in each namespace // create a pod in each namespace
for _, ns := range namespaces { for _, ns := range namespaces {
framework.RunKubectlOrDieInput(ns.Name, updatedPodYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns.Name, updatedPodYaml, "create", "-f", "-")
} }
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
@ -159,7 +161,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
// wait for pods to print their result // wait for pods to print their result
for _, ns := range namespaces { for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendName, frontendName, podOutput, framework.PodStartTimeout) _, err := e2eoutput.LookForStringInLog(ns.Name, frontendName, frontendName, podOutput, framework.PodStartTimeout)
framework.ExpectNoError(err, "pod %s failed to print result in logs", frontendName) framework.ExpectNoError(err, "pod %s failed to print result in logs", frontendName)
} }
}) })

View File

@ -18,6 +18,7 @@ package network
import ( import (
"context" "context"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
@ -117,7 +118,7 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() {
ip := netutils.ParseIPSloppy(clusterIPZero) ip := netutils.ParseIPSloppy(clusterIPZero)
cmd := fmt.Sprintf("echo hostName | nc -v -t -w 2 %s %v", ip.String(), servicePort) cmd := fmt.Sprintf("echo hostName | nc -v -t -w 2 %s %v", ip.String(), servicePort)
err = wait.PollImmediate(1*time.Second, e2eservice.ServiceReachabilityShortPollTimeout, func() (bool, error) { err = wait.PollImmediate(1*time.Second, e2eservice.ServiceReachabilityShortPollTimeout, func() (bool, error) {
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("Service reachability failing with error: %v\nRetrying...", err) framework.Logf("Service reachability failing with error: %v\nRetrying...", err)
return false, nil return false, nil
@ -136,7 +137,7 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() {
// We have to check that the Service is not reachable in the address interpreted as decimal. // We have to check that the Service is not reachable in the address interpreted as decimal.
cmd = fmt.Sprintf("echo hostName | nc -v -t -w 2 %s %v", clusterIPOctal, servicePort) cmd = fmt.Sprintf("echo hostName | nc -v -t -w 2 %s %v", clusterIPOctal, servicePort)
err = wait.PollImmediate(1*time.Second, e2eservice.ServiceReachabilityShortPollTimeout, func() (bool, error) { err = wait.PollImmediate(1*time.Second, e2eservice.ServiceReachabilityShortPollTimeout, func() (bool, error) {
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("Service reachability failing with error: %v\nRetrying...", err) framework.Logf("Service reachability failing with error: %v\nRetrying...", err)
return false, nil return false, nil

View File

@ -112,7 +112,7 @@ var _ = common.SIGDescribe("HostPort", func() {
}, },
}, },
} }
f.PodClient().CreateSync(hostExecPod) e2epod.NewPodClient(f).CreateSync(hostExecPod)
// use a 5 seconds timeout per connection // use a 5 seconds timeout per connection
timeout := 5 timeout := 5
@ -124,14 +124,14 @@ var _ = common.SIGDescribe("HostPort", func() {
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
// check pod1 // check pod1
ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, localhost, port)) ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, localhost, port))
hostname1, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod1...) hostname1, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, hostExecPod.Name, "e2e-host-exec", cmdPod1...)
if err != nil { if err != nil {
framework.Logf("Can not connect from %s to pod(pod1) to serverIP: %s, port: %d", hostExecPod.Name, localhost, port) framework.Logf("Can not connect from %s to pod(pod1) to serverIP: %s, port: %d", hostExecPod.Name, localhost, port)
continue continue
} }
// check pod2 // check pod2
ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port)) ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port))
hostname2, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod2...) hostname2, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, hostExecPod.Name, "e2e-host-exec", cmdPod2...)
if err != nil { if err != nil {
framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port) framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port)
continue continue
@ -143,7 +143,7 @@ var _ = common.SIGDescribe("HostPort", func() {
} }
// check pod3 // check pod3
ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d UDP", hostExecPod.Name, hostIP, port)) ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d UDP", hostExecPod.Name, hostIP, port))
hostname3, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod3...) hostname3, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, hostExecPod.Name, "e2e-host-exec", cmdPod3...)
if err != nil { if err != nil {
framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port) framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port)
continue continue

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -116,7 +117,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
}, },
}, },
} }
fr.PodClient().CreateSync(hostExecPod) e2epod.NewPodClient(fr).CreateSync(hostExecPod)
// Create the client and server pods // Create the client and server pods
clientPodSpec := &v1.Pod{ clientPodSpec := &v1.Pod{
@ -184,7 +185,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
serverNodeInfo.name, serverNodeInfo.name,
serverNodeInfo.nodeIP, serverNodeInfo.nodeIP,
kubeProxyE2eImage)) kubeProxyE2eImage))
fr.PodClient().CreateSync(serverPodSpec) e2epod.NewPodClient(fr).CreateSync(serverPodSpec)
// The server should be listening before spawning the client pod // The server should be listening before spawning the client pod
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(fr.ClientSet, serverPodSpec.Name, fr.Namespace.Name, framework.PodStartTimeout); readyErr != nil { if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(fr.ClientSet, serverPodSpec.Name, fr.Namespace.Name, framework.PodStartTimeout); readyErr != nil {
@ -196,7 +197,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
clientNodeInfo.name, clientNodeInfo.name,
clientNodeInfo.nodeIP, clientNodeInfo.nodeIP,
kubeProxyE2eImage)) kubeProxyE2eImage))
fr.PodClient().CreateSync(clientPodSpec) e2epod.NewPodClient(fr).CreateSync(clientPodSpec)
ginkgo.By("Checking conntrack entries for the timeout") ginkgo.By("Checking conntrack entries for the timeout")
// These must be synchronized from the default values set in // These must be synchronized from the default values set in
@ -217,7 +218,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
"| grep -m 1 'CLOSE_WAIT.*dport=%v' ", "| grep -m 1 'CLOSE_WAIT.*dport=%v' ",
ipFamily, ip, testDaemonTCPPort) ipFamily, ip, testDaemonTCPPort)
if err := wait.PollImmediate(2*time.Second, epsilonSeconds*time.Second, func() (bool, error) { if err := wait.PollImmediate(2*time.Second, epsilonSeconds*time.Second, func() (bool, error) {
result, err := framework.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", cmd) result, err := e2eoutput.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", cmd)
// retry if we can't obtain the conntrack entry // retry if we can't obtain the conntrack entry
if err != nil { if err != nil {
framework.Logf("failed to obtain conntrack entry: %v %v", result, err) framework.Logf("failed to obtain conntrack entry: %v %v", result, err)
@ -239,7 +240,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
return false, fmt.Errorf("wrong TCP CLOSE_WAIT timeout: %v expected: %v", timeoutSeconds, expectedTimeoutSeconds) return false, fmt.Errorf("wrong TCP CLOSE_WAIT timeout: %v expected: %v", timeoutSeconds, expectedTimeoutSeconds)
}); err != nil { }); err != nil {
// Dump all conntrack entries for debugging // Dump all conntrack entries for debugging
result, err2 := framework.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", "conntrack -L") result, err2 := e2eoutput.RunHostCmd(fr.Namespace.Name, "e2e-net-exec", "conntrack -L")
if err2 != nil { if err2 != nil {
framework.Logf("failed to obtain conntrack entry: %v %v", result, err2) framework.Logf("failed to obtain conntrack entry: %v %v", result, err2)
} }

View File

@ -38,6 +38,7 @@ import (
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -628,7 +629,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
tcpIngressIP := e2eservice.GetIngressPoint(lbIngress) tcpIngressIP := e2eservice.GetIngressPoint(lbIngress)
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort) cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) stdout, err := e2eoutput.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil { if err != nil {
framework.Logf("error curling; stdout: %v. err: %v", stdout, err) framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
return false, nil return false, nil
@ -1219,7 +1220,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs) loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName))
if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) {
stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd) stdout, err := e2eoutput.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("got err: %v, retry until timeout", err) framework.Logf("got err: %v, retry until timeout", err)
return false, nil return false, nil

View File

@ -191,7 +191,7 @@ func (k *kubeManager) probeConnectivity(args *probeConnectivityArgs) (bool, stri
// executeRemoteCommand executes a remote shell command on the given pod. // executeRemoteCommand executes a remote shell command on the given pod.
func (k *kubeManager) executeRemoteCommand(namespace string, pod string, containerName string, command []string) (string, string, error) { func (k *kubeManager) executeRemoteCommand(namespace string, pod string, containerName string, command []string) (string, string, error) {
return k.framework.ExecWithOptions(framework.ExecOptions{ return e2epod.ExecWithOptions(k.framework, e2epod.ExecOptions{
Command: command, Command: command,
Namespace: namespace, Namespace: namespace,
PodName: pod, PodName: pod,

View File

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -1934,7 +1935,7 @@ func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.P
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
if err != nil { if err != nil {
// Dump debug information for the test namespace. // Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient) pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient)
framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
@ -1950,7 +1951,7 @@ func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1
// Dump debug information if the error was nil. // Dump debug information if the error was nil.
if err == nil { if err == nil {
// Dump debug information for the test namespace. // Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient) pods, policies, logs := collectPodsAndNetworkPolicies(f, podClient)
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
@ -1976,7 +1977,7 @@ func checkNoConnectivityByExitCode(f *framework.Framework, ns *v1.Namespace, pod
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods) framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
// Dump debug information for the test namespace. // Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) e2eoutput.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
} }
} }

View File

@ -18,9 +18,10 @@ package netpol
import ( import (
"fmt" "fmt"
"strings"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"strings"
) )
// TestCase describes the data for a netpol test // TestCase describes the data for a netpol test

View File

@ -33,6 +33,7 @@ import (
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -234,7 +235,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]",
podName := pod.Name podName := pod.Name
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
iperfVersion := f.ExecShellInPod(podName, "iperf -v || true") iperfVersion := e2epod.ExecShellInPod(f, podName, "iperf -v || true")
framework.Logf("iperf version: %s", iperfVersion) framework.Logf("iperf version: %s", iperfVersion)
for try := 0; ; try++ { for try := 0; ; try++ {
@ -247,7 +248,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]",
*/ */
command := fmt.Sprintf(`iperf %s -e -p %d --reportstyle C -i 1 -c %s && sleep 5`, familyStr, iperf2Port, serverServiceName) command := fmt.Sprintf(`iperf %s -e -p %d --reportstyle C -i 1 -c %s && sleep 5`, familyStr, iperf2Port, serverServiceName)
framework.Logf("attempting to run command '%s' in client pod %s (node %s)", command, podName, nodeName) framework.Logf("attempting to run command '%s' in client pod %s (node %s)", command, podName, nodeName)
output := f.ExecShellInPod(podName, command) output := e2epod.ExecShellInPod(f, podName, command)
framework.Logf("output from exec on client pod %s (node %s): \n%s\n", podName, nodeName, output) framework.Logf("output from exec on client pod %s (node %s): \n%s\n", podName, nodeName, output)
results, err := ParseIPerf2EnhancedResultsFromCSV(output) results, err := ParseIPerf2EnhancedResultsFromCSV(output)

View File

@ -27,7 +27,7 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"

View File

@ -64,6 +64,8 @@ import (
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -134,7 +136,7 @@ func affinityCheckFromPod(execPod *v1.Pod, serviceIP string, servicePort int) (t
curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort) curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort)
cmd := fmt.Sprintf("for i in $(seq 0 %d); do echo; %s ; done", AffinityConfirmCount, curl) cmd := fmt.Sprintf("for i in $(seq 0 %d); do echo; %s ; done", AffinityConfirmCount, curl)
getHosts := func() []string { getHosts := func() []string {
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort) framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort)
return nil return nil
@ -343,7 +345,7 @@ func verifyServeHostnameServiceUp(c clientset.Interface, ns string, expectedPods
50*len(expectedPods), wgetCmd, serviceIPPort) 50*len(expectedPods), wgetCmd, serviceIPPort)
framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, podName) framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, podName)
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec. // TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
output, err := framework.RunHostCmd(ns, podName, cmd) output, err := e2eoutput.RunHostCmd(ns, podName, cmd)
if err != nil { if err != nil {
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, podName, err, output) framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, podName, err, output)
} }
@ -406,7 +408,7 @@ func verifyServeHostnameServiceDown(c clientset.Interface, ns string, serviceIP
"curl -g -s --connect-timeout 2 http://%s && echo service-down-failed", ipPort) "curl -g -s --connect-timeout 2 http://%s && echo service-down-failed", ipPort)
for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
output, err := framework.RunHostCmd(ns, hostExecPod.Name, command) output, err := e2eoutput.RunHostCmd(ns, hostExecPod.Name, command)
if err != nil { if err != nil {
framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", command, ns, hostExecPod.Name, err, output) framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", command, ns, hostExecPod.Name, err, output)
} }
@ -1762,7 +1764,7 @@ var _ = common.SIGDescribe("Services", func() {
var stdout string var stdout string
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
var err error var err error
stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) stdout, err = e2eoutput.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil { if err != nil {
framework.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout) framework.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
return false, nil return false, nil
@ -1855,7 +1857,7 @@ var _ = common.SIGDescribe("Services", func() {
var stdout string var stdout string
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
var err error var err error
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd)
if err != nil { if err != nil {
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
return false, nil return false, nil
@ -1878,7 +1880,7 @@ var _ = common.SIGDescribe("Services", func() {
cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/; test \"$?\" -ne \"0\"", svcName, port) cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/; test \"$?\" -ne \"0\"", svcName, port)
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
var err error var err error
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd)
if err != nil { if err != nil {
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
return false, nil return false, nil
@ -1898,7 +1900,7 @@ var _ = common.SIGDescribe("Services", func() {
cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port) cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port)
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
var err error var err error
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd)
if err != nil { if err != nil {
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
return false, nil return false, nil
@ -2153,7 +2155,7 @@ var _ = common.SIGDescribe("Services", func() {
clusterIPAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort)) clusterIPAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress)
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) {
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
if err != nil { if err != nil {
return true, nil return true, nil
} }
@ -2169,15 +2171,15 @@ var _ = common.SIGDescribe("Services", func() {
// connect 3 times every 5 seconds to the Service and expect a failure // connect 3 times every 5 seconds to the Service and expect a failure
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress) cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP") framework.ExpectError(err, "expected error when trying to connect to cluster IP")
cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress0) cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress0)
_, err = framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err = e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to NodePort address") framework.ExpectError(err, "expected error when trying to connect to NodePort address")
cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1) cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1)
_, err = framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err = e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to NodePort address") framework.ExpectError(err, "expected error when trying to connect to NodePort address")
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
@ -2410,7 +2412,7 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName)) ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName))
expectedErr := "REFUSED" expectedErr := "REFUSED"
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) {
_, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) _, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
if strings.Contains(err.Error(), expectedErr) { if strings.Contains(err.Error(), expectedErr) {
@ -2452,7 +2454,7 @@ var _ = common.SIGDescribe("Services", func() {
evictedPod.Spec.Containers[0].Resources = v1.ResourceRequirements{ evictedPod.Spec.Containers[0].Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{"ephemeral-storage": resource.MustParse("5Mi")}, Limits: v1.ResourceList{"ephemeral-storage": resource.MustParse("5Mi")},
} }
f.PodClient().Create(evictedPod) e2epod.NewPodClient(f).Create(evictedPod)
err = e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, evictedPod.Name, "Evicted", f.Namespace.Name) err = e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, evictedPod.Name, "Evicted", f.Namespace.Name)
if err != nil { if err != nil {
framework.Failf("error waiting for pod to be evicted: %v", err) framework.Failf("error waiting for pod to be evicted: %v", err)
@ -2501,7 +2503,7 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v expected to be refused", serviceAddress, podName, nodeName)) ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v expected to be refused", serviceAddress, podName, nodeName))
expectedErr := "REFUSED" expectedErr := "REFUSED"
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) {
_, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) _, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
if strings.Contains(err.Error(), expectedErr) { if strings.Contains(err.Error(), expectedErr) {
@ -2590,7 +2592,7 @@ var _ = common.SIGDescribe("Services", func() {
// the second pause pod is on a different node, so it should see a connection error every time // the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP") framework.ExpectError(err, "expected error when trying to connect to cluster IP")
} }
}) })
@ -2670,7 +2672,7 @@ var _ = common.SIGDescribe("Services", func() {
// the second pause pod is on a different node, so it should see a connection error every time // the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP") framework.ExpectError(err, "expected error when trying to connect to cluster IP")
} }
}) })
@ -2753,7 +2755,7 @@ var _ = common.SIGDescribe("Services", func() {
// the second pause pod is on a different node, so it should see a connection error every time // the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP") framework.ExpectError(err, "expected error when trying to connect to cluster IP")
} }
@ -2782,7 +2784,7 @@ var _ = common.SIGDescribe("Services", func() {
// the second pause pod is on a different node, so it should see a connection error every time // the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod3.Namespace, pausePod3.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod3.Namespace, pausePod3.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP") framework.ExpectError(err, "expected error when trying to connect to cluster IP")
} }
}) })
@ -2847,7 +2849,7 @@ var _ = common.SIGDescribe("Services", func() {
// validate that the health check node port from kube-proxy returns 200 when there are ready endpoints // validate that the health check node port from kube-proxy returns 200 when there are ready endpoints
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr) cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr)
out, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) out, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -2868,7 +2870,7 @@ var _ = common.SIGDescribe("Services", func() {
// validate that the health check node port from kube-proxy returns 503 when there are no ready endpoints // validate that the health check node port from kube-proxy returns 503 when there are no ready endpoints
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr) cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr)
out, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) out, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -3050,7 +3052,7 @@ var _ = common.SIGDescribe("Services", func() {
execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name) execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP") framework.ExpectError(err, "expected error when trying to connect to cluster IP")
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
@ -3226,7 +3228,7 @@ var _ = common.SIGDescribe("Services", func() {
// pausePod0 -> node0 and pausePod1 -> node1 both succeed because pod-to-same-node-NodePort // pausePod0 -> node0 and pausePod1 -> node1 both succeed because pod-to-same-node-NodePort
// connections are neither internal nor external and always get Cluster traffic policy. // connections are neither internal nor external and always get Cluster traffic policy.
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1)
_, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to node port for pausePod0") framework.ExpectError(err, "expected error when trying to connect to node port for pausePod0")
execHostnameTest(*pausePod0, nodePortAddress0, webserverPod0.Name) execHostnameTest(*pausePod0, nodePortAddress0, webserverPod0.Name)
@ -3820,7 +3822,7 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client
hosts := sets.NewString() hosts := sets.NewString()
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, net.JoinHostPort(svcIP, strconv.Itoa(servicePort))) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, net.JoinHostPort(svcIP, strconv.Itoa(servicePort)))
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
hostname, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) hostname, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err == nil { if err == nil {
hosts.Insert(hostname) hosts.Insert(hostname)
if hosts.Len() > 1 { if hosts.Len() > 1 {
@ -3999,7 +4001,7 @@ func createPodOrFail(f *framework.Framework, ns, name string, labels map[string]
// Add a dummy environment variable to work around a docker issue. // Add a dummy environment variable to work around a docker issue.
// https://github.com/docker/docker/issues/14203 // https://github.com/docker/docker/issues/14203
pod.Spec.Containers[0].Env = []v1.EnvVar{{Name: "FOO", Value: " "}} pod.Spec.Containers[0].Env = []v1.EnvVar{{Name: "FOO", Value: " "}}
f.PodClient().CreateSync(pod) e2epod.NewPodClient(f).CreateSync(pod)
} }
// launchHostExecPod launches a hostexec pod in the given namespace and waits // launchHostExecPod launches a hostexec pod in the given namespace and waits
@ -4018,7 +4020,7 @@ func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
func checkReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) { func checkReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) {
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target) cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
_, err := framework.RunHostCmd(namespace, pod, cmd) _, err := e2eoutput.RunHostCmd(namespace, pod, cmd)
if expectToBeReachable && err != nil { if expectToBeReachable && err != nil {
framework.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err) framework.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
return false, nil return false, nil
@ -4037,11 +4039,11 @@ func checkReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n
func proxyMode(f *framework.Framework) (string, error) { func proxyMode(f *framework.Framework) (string, error) {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "kube-proxy-mode-detector", nil, nil, nil) pod := e2epod.NewAgnhostPod(f.Namespace.Name, "kube-proxy-mode-detector", nil, nil, nil)
pod.Spec.HostNetwork = true pod.Spec.HostNetwork = true
f.PodClient().CreateSync(pod) e2epod.NewPodClient(f).CreateSync(pod)
defer f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) defer e2epod.NewPodClient(f).DeleteSync(pod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode" cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := framework.RunHostCmd(pod.Namespace, pod.Name, cmd) stdout, err := e2eoutput.RunHostCmd(pod.Namespace, pod.Name, cmd)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -4159,7 +4161,7 @@ func restartApiserver(namespace string, cs clientset.Interface) error {
if err != nil { if err != nil {
return err return err
} }
return framework.MasterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v' return e2eproviders.MasterUpgradeGKE(namespace, v.GitVersion[1:]) // strip leading 'v'
} }
return restartComponent(cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) return restartComponent(cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName})
@ -4266,7 +4268,7 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection) e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection)
ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name)) ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name))
f.PodClient().CreateSync(podSpec) e2epod.NewPodClient(f).CreateSync(podSpec)
defer func() { defer func() {
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name)

View File

@ -184,7 +184,7 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() {
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do sleep 1; echo "Date: $(date) Try: ${i}"; curl -q -s --connect-timeout 2 http://%s:80/ ; echo; done`, svc.Name) cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do sleep 1; echo "Date: $(date) Try: ${i}"; curl -q -s --connect-timeout 2 http://%s:80/ ; echo; done`, svc.Name)
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd} clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
clientPod.Spec.Containers[0].Name = clientPod.Name clientPod.Spec.Containers[0].Name = clientPod.Name
f.PodClient().CreateSync(clientPod) e2epod.NewPodClient(f).CreateSync(clientPod)
framework.Logf("Ensuring that requests from %s pod on %s node stay in %s zone", clientPod.Name, nodeName, fromZone) framework.Logf("Ensuring that requests from %s pod on %s node stay in %s zone", clientPod.Name, nodeName, fromZone)

View File

@ -29,7 +29,9 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -73,7 +75,7 @@ func GetHTTPContentFromTestContainer(config *e2enetwork.NetworkingTestConfig, ho
// DescribeSvc logs the output of kubectl describe svc for the given namespace // DescribeSvc logs the output of kubectl describe svc for the given namespace
func DescribeSvc(ns string) { func DescribeSvc(ns string) {
framework.Logf("\nOutput of kubectl describe svc:\n") framework.Logf("\nOutput of kubectl describe svc:\n")
desc, _ := framework.RunKubectl( desc, _ := e2ekubectl.RunKubectl(
ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns))
framework.Logf(desc) framework.Logf(desc)
} }
@ -117,7 +119,7 @@ func execSourceIPTest(sourcePod v1.Pod, targetAddr string) (string, string) {
framework.Logf("Waiting up to %v to get response from %s", timeout, targetAddr) framework.Logf("Waiting up to %v to get response from %s", timeout, targetAddr)
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/clientip`, targetAddr) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/clientip`, targetAddr)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
stdout, err = framework.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd) stdout, err = e2eoutput.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("got err: %v, retry until timeout", err) framework.Logf("got err: %v, retry until timeout", err)
continue continue
@ -155,7 +157,7 @@ func execHostnameTest(sourcePod v1.Pod, targetAddr, targetHostname string) {
framework.Logf("Waiting up to %v to get response from %s", timeout, targetAddr) framework.Logf("Waiting up to %v to get response from %s", timeout, targetAddr)
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/hostname`, targetAddr) cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/hostname`, targetAddr)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
stdout, err = framework.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd) stdout, err = e2eoutput.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("got err: %v, retry until timeout", err) framework.Logf("got err: %v, retry until timeout", err)
continue continue

View File

@ -19,6 +19,7 @@ package node
import ( import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2esecurity "k8s.io/kubernetes/test/e2e/framework/security" e2esecurity "k8s.io/kubernetes/test/e2e/framework/security"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -43,11 +44,11 @@ var _ = SIGDescribe("AppArmor", func() {
}) })
ginkgo.It("should enforce an AppArmor profile", func() { ginkgo.It("should enforce an AppArmor profile", func() {
e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), false, true) e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true)
}) })
ginkgo.It("can disable an AppArmor profile, using unconfined", func() { ginkgo.It("can disable an AppArmor profile, using unconfined", func() {
e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), true, true) e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), true, true)
}) })
}) })
}) })

View File

@ -32,7 +32,9 @@ import (
commonutils "k8s.io/kubernetes/test/e2e/common" commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -71,8 +73,8 @@ var _ = SIGDescribe("[Feature:Example]", func() {
execYaml := readFile(test, "exec-liveness.yaml.in") execYaml := readFile(test, "exec-liveness.yaml.in")
httpYaml := readFile(test, "http-liveness.yaml.in") httpYaml := readFile(test, "http-liveness.yaml.in")
framework.RunKubectlOrDieInput(ns, execYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, execYaml, "create", "-f", "-")
framework.RunKubectlOrDieInput(ns, httpYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, httpYaml, "create", "-f", "-")
// Since both containers start rapidly, we can easily run this test in parallel. // Since both containers start rapidly, we can easily run this test in parallel.
var wg sync.WaitGroup var wg sync.WaitGroup
@ -121,13 +123,13 @@ var _ = SIGDescribe("[Feature:Example]", func() {
podName := "secret-test-pod" podName := "secret-test-pod"
ginkgo.By("creating secret and pod") ginkgo.By("creating secret and pod")
framework.RunKubectlOrDieInput(ns, secretYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, secretYaml, "create", "-f", "-")
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking if secret was read correctly") ginkgo.By("checking if secret was read correctly")
_, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout) _, err = e2eoutput.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
@ -139,14 +141,14 @@ var _ = SIGDescribe("[Feature:Example]", func() {
podName := "dapi-test-pod" podName := "dapi-test-pod"
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking if name and namespace were passed correctly") ginkgo.By("checking if name and namespace were passed correctly")
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout) _, err = e2eoutput.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout) _, err = e2eoutput.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })

View File

@ -110,7 +110,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
func restartNfsServer(serverPod *v1.Pod) { func restartNfsServer(serverPod *v1.Pod) {
const startcmd = "/usr/sbin/rpc.nfsd 1" const startcmd = "/usr/sbin/rpc.nfsd 1"
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace) ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd) e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd)
} }
// Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the // Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the
@ -119,7 +119,7 @@ func restartNfsServer(serverPod *v1.Pod) {
func stopNfsServer(serverPod *v1.Pod) { func stopNfsServer(serverPod *v1.Pod) {
const stopcmd = "/usr/sbin/rpc.nfsd 0" const stopcmd = "/usr/sbin/rpc.nfsd 0"
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace) ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
framework.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd) e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd)
} }
// Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container // Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container
@ -310,7 +310,7 @@ var _ = SIGDescribe("kubelet", func() {
} }
for nodeName := range nodeNames { for nodeName := range nodeNames {
for k, v := range nodeLabels { for k, v := range nodeLabels {
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v) e2enode.AddOrUpdateLabelOnNode(c, nodeName, k, v)
} }
} }
@ -334,7 +334,7 @@ var _ = SIGDescribe("kubelet", func() {
// If we added labels to nodes in this test, remove them now. // If we added labels to nodes in this test, remove them now.
for nodeName := range nodeNames { for nodeName := range nodeNames {
for k := range nodeLabels { for k := range nodeLabels {
framework.RemoveLabelOffNode(c, nodeName, k) e2enode.RemoveLabelOffNode(c, nodeName, k)
} }
} }
}) })

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -111,7 +112,7 @@ var _ = SIGDescribe("Mount propagation", func() {
hostExec.IssueCommand(cleanCmd, node) hostExec.IssueCommand(cleanCmd, node)
}() }()
podClient := f.PodClient() podClient := e2epod.NewPodClient(f)
bidirectional := v1.MountPropagationBidirectional bidirectional := v1.MountPropagationBidirectional
master := podClient.CreateSync(preparePod("master", node, &bidirectional, hostDir)) master := podClient.CreateSync(preparePod("master", node, &bidirectional, hostDir))
@ -128,18 +129,18 @@ var _ = SIGDescribe("Mount propagation", func() {
for _, podName := range podNames { for _, podName := range podNames {
for _, dirName := range podNames { for _, dirName := range podNames {
cmd := fmt.Sprintf("test -d /mnt/test/%s", dirName) cmd := fmt.Sprintf("test -d /mnt/test/%s", dirName)
f.ExecShellInPod(podName, cmd) e2epod.ExecShellInPod(f, podName, cmd)
} }
} }
// Each pod mounts one tmpfs to /mnt/test/<podname> and puts a file there. // Each pod mounts one tmpfs to /mnt/test/<podname> and puts a file there.
for _, podName := range podNames { for _, podName := range podNames {
cmd := fmt.Sprintf("mount -t tmpfs e2e-mount-propagation-%[1]s /mnt/test/%[1]s; echo %[1]s > /mnt/test/%[1]s/file", podName) cmd := fmt.Sprintf("mount -t tmpfs e2e-mount-propagation-%[1]s /mnt/test/%[1]s; echo %[1]s > /mnt/test/%[1]s/file", podName)
f.ExecShellInPod(podName, cmd) e2epod.ExecShellInPod(f, podName, cmd)
// unmount tmpfs when the test finishes // unmount tmpfs when the test finishes
cmd = fmt.Sprintf("umount /mnt/test/%s", podName) cmd = fmt.Sprintf("umount /mnt/test/%s", podName)
defer f.ExecShellInPod(podName, cmd) defer e2epod.ExecShellInPod(f, podName, cmd)
} }
// The host mounts one tmpfs to testdir/host and puts a file there so we // The host mounts one tmpfs to testdir/host and puts a file there so we
@ -170,7 +171,7 @@ var _ = SIGDescribe("Mount propagation", func() {
for podName, mounts := range expectedMounts { for podName, mounts := range expectedMounts {
for _, mountName := range dirNames { for _, mountName := range dirNames {
cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName) cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName)
stdout, stderr, err := f.ExecShellInPodWithFullOutput(podName, cmd) stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(f, podName, cmd)
framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err) framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName) msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName)
shouldBeVisible := mounts.Has(mountName) shouldBeVisible := mounts.Has(mountName)

View File

@ -55,9 +55,9 @@ var _ = SIGDescribe("Pods Extended", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.Describe("Delete Grace Period", func() { ginkgo.Describe("Delete Grace Period", func() {
var podClient *framework.PodClient var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = e2epod.NewPodClient(f)
}) })
/* /*
@ -148,9 +148,9 @@ var _ = SIGDescribe("Pods Extended", func() {
}) })
ginkgo.Describe("Pods Set QOS Class", func() { ginkgo.Describe("Pods Set QOS Class", func() {
var podClient *framework.PodClient var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = e2epod.NewPodClient(f)
}) })
/* /*
@ -200,9 +200,9 @@ var _ = SIGDescribe("Pods Extended", func() {
}) })
ginkgo.Describe("Pod Container Status", func() { ginkgo.Describe("Pod Container Status", func() {
var podClient *framework.PodClient var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = e2epod.NewPodClient(f)
}) })
ginkgo.It("should never report success for a pending container", func() { ginkgo.It("should never report success for a pending container", func() {
@ -224,9 +224,9 @@ var _ = SIGDescribe("Pods Extended", func() {
}) })
ginkgo.Describe("Pod Container lifecycle", func() { ginkgo.Describe("Pod Container lifecycle", func() {
var podClient *framework.PodClient var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = e2epod.NewPodClient(f)
}) })
ginkgo.It("should not create extra sandbox if all containers are done", func() { ginkgo.It("should not create extra sandbox if all containers are done", func() {

View File

@ -155,9 +155,9 @@ func testPreStop(c clientset.Interface, ns string) {
var _ = SIGDescribe("PreStop", func() { var _ = SIGDescribe("PreStop", func() {
f := framework.NewDefaultFramework("prestop") f := framework.NewDefaultFramework("prestop")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *framework.PodClient var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = e2epod.NewPodClient(f)
}) })
/* /*

View File

@ -19,6 +19,7 @@ package node
import ( import (
"context" "context"
"fmt" "fmt"
"k8s.io/pod-security-admission/api" "k8s.io/pod-security-admission/api"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -89,9 +90,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
ginkgo.By("Trying to apply a label on the found node.") ginkgo.By("Trying to apply a label on the found node.")
for key, value := range nodeSelector { for key, value := range nodeSelector {
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value)
framework.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value)
defer framework.RemoveLabelOffNode(f.ClientSet, nodeName, key) defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key)
} }
ginkgo.By("Trying to apply taint on the found node.") ginkgo.By("Trying to apply taint on the found node.")
@ -101,7 +102,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
Effect: v1.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
} }
e2enode.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint) e2enode.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint)
framework.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint) e2enode.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint)
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, taint) defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, taint)
ginkgo.By("Trying to create runtimeclass and pod") ginkgo.By("Trying to create runtimeclass and pod")
@ -114,7 +115,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
pod.Spec.NodeSelector = map[string]string{ pod.Spec.NodeSelector = map[string]string{
labelFooName: "bar", labelFooName: "bar",
} }
pod = f.PodClient().Create(pod) pod = e2epod.NewPodClient(f).Create(pod)
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name)) framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name))
@ -145,9 +146,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
ginkgo.By("Trying to apply a label on the found node.") ginkgo.By("Trying to apply a label on the found node.")
for key, value := range nodeSelector { for key, value := range nodeSelector {
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value) e2enode.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value)
framework.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value) e2enode.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value)
defer framework.RemoveLabelOffNode(f.ClientSet, nodeName, key) defer e2enode.RemoveLabelOffNode(f.ClientSet, nodeName, key)
} }
ginkgo.By("Trying to create runtimeclass and pod") ginkgo.By("Trying to create runtimeclass and pod")
@ -160,7 +161,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
pod.Spec.NodeSelector = map[string]string{ pod.Spec.NodeSelector = map[string]string{
labelFooName: "bar", labelFooName: "bar",
} }
pod = f.PodClient().Create(pod) pod = e2epod.NewPodClient(f).Create(pod)
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name)) framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name))

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -73,7 +74,7 @@ var _ = SIGDescribe("Security Context", func() {
pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.Containers[0].Command = []string{"id", "-G"}
pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678}
groups := []string{"1234", "5678"} groups := []string{"1234", "5678"}
f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups)
}) })
ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() { ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() {
@ -82,7 +83,7 @@ var _ = SIGDescribe("Security Context", func() {
pod.Spec.SecurityContext.RunAsUser = &userID pod.Spec.SecurityContext.RunAsUser = &userID
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"}
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("uid=%v", userID), fmt.Sprintf("uid=%v", userID),
fmt.Sprintf("gid=%v", 0), fmt.Sprintf("gid=%v", 0),
}) })
@ -102,7 +103,7 @@ var _ = SIGDescribe("Security Context", func() {
pod.Spec.SecurityContext.RunAsGroup = &groupID pod.Spec.SecurityContext.RunAsGroup = &groupID
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"}
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("uid=%v", userID), fmt.Sprintf("uid=%v", userID),
fmt.Sprintf("gid=%v", groupID), fmt.Sprintf("gid=%v", groupID),
}) })
@ -117,7 +118,7 @@ var _ = SIGDescribe("Security Context", func() {
pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"}
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("uid=%v", overrideUserID), fmt.Sprintf("uid=%v", overrideUserID),
fmt.Sprintf("gid=%v", 0), fmt.Sprintf("gid=%v", 0),
}) })
@ -142,7 +143,7 @@ var _ = SIGDescribe("Security Context", func() {
pod.Spec.Containers[0].SecurityContext.RunAsGroup = &overrideGroupID pod.Spec.Containers[0].SecurityContext.RunAsGroup = &overrideGroupID
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"}
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ e2eoutput.TestContainerOutput(f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("uid=%v", overrideUserID), fmt.Sprintf("uid=%v", overrideUserID),
fmt.Sprintf("gid=%v", overrideGroupID), fmt.Sprintf("gid=%v", overrideGroupID),
}) })
@ -165,27 +166,27 @@ var _ = SIGDescribe("Security Context", func() {
pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}}
pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}}
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput("seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled e2eoutput.TestContainerOutput(f, "seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled
}) })
ginkgo.It("should support seccomp unconfined on the pod [LinuxOnly]", func() { ginkgo.It("should support seccomp unconfined on the pod [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}}
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput("seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled e2eoutput.TestContainerOutput(f, "seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled
}) })
ginkgo.It("should support seccomp runtime/default [LinuxOnly]", func() { ginkgo.It("should support seccomp runtime/default [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}}
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput("seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered e2eoutput.TestContainerOutput(f, "seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered
}) })
ginkgo.It("should support seccomp default which is unconfined [LinuxOnly]", func() { ginkgo.It("should support seccomp default which is unconfined [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput("seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled e2eoutput.TestContainerOutput(f, "seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled
}) })
}) })
@ -262,7 +263,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c0,c1", Level: "s0:c0,c1",
} }
f.TestContainerOutput("Pod with same MCS label reading test file", pod, 0, []string{testContent}) e2eoutput.TestContainerOutput(f, "Pod with same MCS label reading test file", pod, 0, []string{testContent})
// Confirm that the same pod with a different MCS // Confirm that the same pod with a different MCS
// label cannot access the volume // label cannot access the volume

View File

@ -191,7 +191,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit // Wait a bit
@ -223,7 +223,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit // Wait a bit
@ -256,7 +256,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit // Wait a bit
@ -303,7 +303,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
taintRemoved := false taintRemoved := false
defer func() { defer func() {
if !taintRemoved { if !taintRemoved {
@ -378,11 +378,11 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
ginkgo.By("Trying to apply a taint on the Nodes") ginkgo.By("Trying to apply a taint on the Nodes")
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName1, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName1, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName1, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName1, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName1, testTaint)
if nodeName2 != nodeName1 { if nodeName2 != nodeName1 {
e2enode.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName2, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName2, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName2, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName2, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName2, testTaint)
} }
@ -451,7 +451,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
// 3. Wait to see if both pods get evicted in between [5, 25] seconds // 3. Wait to see if both pods get evicted in between [5, 25] seconds

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2ejob "k8s.io/kubernetes/test/e2e/framework/job" e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest" e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
@ -127,7 +128,7 @@ func getGPUsAvailable(f *framework.Framework) int64 {
} }
// SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes // SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer { func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *e2edebug.ContainerResourceGatherer {
logOSImages(f) logOSImages(f)
var err error var err error
@ -161,10 +162,10 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
pods.Items = append(pods.Items, devicepluginPods.Items...) pods.Items = append(pods.Items, devicepluginPods.Items...)
} }
var rsgather *framework.ContainerResourceGatherer var rsgather *e2edebug.ContainerResourceGatherer
if setupResourceGatherer { if setupResourceGatherer {
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) rsgather, err = e2edebug.NewResourceUsageGatherer(f.ClientSet, e2edebug.ResourceGathererOptions{InKubemark: false, Nodes: e2edebug.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData() go rsgather.StartGatheringData()
} }
@ -195,17 +196,17 @@ func testNvidiaGPUs(f *framework.Framework) {
framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum) framework.Logf("Creating %d pods and have the pods run a CUDA app", gpuPodNum)
podList := []*v1.Pod{} podList := []*v1.Pod{}
for i := int64(0); i < gpuPodNum; i++ { for i := int64(0); i < gpuPodNum; i++ {
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod())) podList = append(podList, e2epod.NewPodClient(f).Create(makeCudaAdditionDevicePluginTestPod()))
} }
framework.Logf("Wait for all test pods to succeed") framework.Logf("Wait for all test pods to succeed")
// Wait for all pods to succeed // Wait for all pods to succeed
for _, pod := range podList { for _, pod := range podList {
f.PodClient().WaitForSuccess(pod.Name, 5*time.Minute) e2epod.NewPodClient(f).WaitForSuccess(pod.Name, 5*time.Minute)
logContainers(f, pod) logContainers(f, pod)
} }
framework.Logf("Stopping ResourceUsageGather") framework.Logf("Stopping ResourceUsageGather")
constraints := make(map[string]framework.ResourceConstraint) constraints := make(map[string]e2edebug.ResourceConstraint)
// For now, just gets summary. Can pass valid constraints in the future. // For now, just gets summary. Can pass valid constraints in the future.
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints) summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
f.TestSummaries = append(f.TestSummaries, summary) f.TestSummaries = append(f.TestSummaries, summary)
@ -299,7 +300,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
successes := int32(0) successes := int32(0)
regex := regexp.MustCompile("PASSED") regex := regexp.MustCompile("PASSED")
for _, podName := range createdPodNames { for _, podName := range createdPodNames {
f.PodClient().WaitForFinish(podName, 5*time.Minute) e2epod.NewPodClient(f).WaitForFinish(podName, 5*time.Minute)
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podName, "vector-addition") logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podName, "vector-addition")
framework.ExpectNoError(err, "Should be able to get logs for pod %v", podName) framework.ExpectNoError(err, "Should be able to get logs for pod %v", podName)
if regex.MatchString(logs) { if regex.MatchString(logs) {

View File

@ -95,7 +95,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeList = &v1.NodeList{} nodeList = &v1.NodeList{}
var err error var err error
framework.AllNodesReady(cs, time.Minute) e2enode.AllNodesReady(cs, time.Minute)
nodeList, err = e2enode.GetReadySchedulableNodes(cs) nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil { if err != nil {
@ -339,8 +339,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
continue continue
} }
// Apply node label to each node // Apply node label to each node
framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name) e2enode.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name) e2enode.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
// Find allocatable amount of CPU. // Find allocatable amount of CPU.
allocatable, found := node.Status.Allocatable[v1.ResourceCPU] allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
if !found { if !found {
@ -354,7 +354,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
// Clean up added labels after this test. // Clean up added labels after this test.
defer func() { defer func() {
for nodeName := range nodeToAllocatableMap { for nodeName := range nodeToAllocatableMap {
framework.RemoveLabelOffNode(cs, nodeName, "node") e2enode.RemoveLabelOffNode(cs, nodeName, "node")
} }
}() }()
@ -464,9 +464,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42" v := "42"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v) e2enode.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k) defer e2enode.RemoveLabelOffNode(cs, nodeName, k)
ginkgo.By("Trying to relaunch the pod, now with labels.") ginkgo.By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels" labelPodName := "with-labels"
@ -537,9 +537,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
v := "42" v := "42"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v) e2enode.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k) defer e2enode.RemoveLabelOffNode(cs, nodeName, k)
ginkgo.By("Trying to relaunch the pod, now with labels.") ginkgo.By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels" labelPodName := "with-labels"
@ -589,15 +589,15 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
Effect: v1.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
} }
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value" labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) e2enode.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(cs, nodeName, labelKey) defer e2enode.RemoveLabelOffNode(cs, nodeName, labelKey)
ginkgo.By("Trying to relaunch the pod, now with tolerations.") ginkgo.By("Trying to relaunch the pod, now with tolerations.")
tolerationPodName := "with-tolerations" tolerationPodName := "with-tolerations"
@ -632,15 +632,15 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
Effect: v1.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
} }
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID())) labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
labelValue := "testing-label-value" labelValue := "testing-label-value"
framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue) e2enode.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
defer framework.RemoveLabelOffNode(cs, nodeName, labelKey) defer e2enode.RemoveLabelOffNode(cs, nodeName, labelKey)
ginkgo.By("Trying to relaunch the pod, still no tolerations.") ginkgo.By("Trying to relaunch the pod, still no tolerations.")
podNameNoTolerations := "still-no-tolerations" podNameNoTolerations := "still-no-tolerations"
@ -674,9 +674,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeSelector := make(map[string]string) nodeSelector := make(map[string]string)
nodeSelector[k] = v nodeSelector[k] = v
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v) e2enode.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k) defer e2enode.RemoveLabelOffNode(cs, nodeName, k)
port := int32(54321) port := int32(54321)
ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost)) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost))
@ -707,9 +707,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeSelector := make(map[string]string) nodeSelector := make(map[string]string)
nodeSelector[k] = v nodeSelector[k] = v
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v) e2enode.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k) defer e2enode.RemoveLabelOffNode(cs, nodeName, k)
port := int32(54322) port := int32(54322)
ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port)) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
@ -731,12 +731,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeNames = Get2NodesThatCanRunPod(f) nodeNames = Get2NodesThatCanRunPod(f)
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
} }
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
framework.RemoveLabelOffNode(cs, nodeName, topologyKey) e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey)
} }
}) })

View File

@ -327,7 +327,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
nodeNames = Get2NodesThatCanRunPod(f) nodeNames = Get2NodesThatCanRunPod(f)
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -342,7 +342,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
framework.RemoveLabelOffNode(cs, nodeName, topologyKey) e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey)
} }
for _, node := range nodes { for _, node := range nodes {
nodeCopy := node.DeepCopy() nodeCopy := node.DeepCopy()

View File

@ -142,17 +142,17 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Trying to apply a label on the found node.") ginkgo.By("Trying to apply a label on the found node.")
k = "kubernetes.io/e2e-node-topologyKey" k = "kubernetes.io/e2e-node-topologyKey"
v := "topologyvalue1" v := "topologyvalue1"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v) e2enode.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k) defer e2enode.RemoveLabelOffNode(cs, nodeName, k)
ginkgo.By("Trying to apply a label on other nodes.") ginkgo.By("Trying to apply a label on other nodes.")
v = "topologyvalue2" v = "topologyvalue2"
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if node.Name != nodeName { if node.Name != nodeName {
framework.AddOrUpdateLabelOnNode(cs, node.Name, k, v) e2enode.AddOrUpdateLabelOnNode(cs, node.Name, k, v)
framework.ExpectNodeHasLabel(cs, node.Name, k, v) e2enode.ExpectNodeHasLabel(cs, node.Name, k, v)
defer framework.RemoveLabelOffNode(cs, node.Name, k) defer e2enode.RemoveLabelOffNode(cs, node.Name, k)
} }
} }
} }
@ -276,12 +276,12 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
nodeNames = Get2NodesThatCanRunPod(f) nodeNames = Get2NodesThatCanRunPod(f)
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey)) ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName) e2enode.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
} }
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
framework.RemoveLabelOffNode(cs, nodeName, topologyKey) e2enode.RemoveLabelOffNode(cs, nodeName, topologyKey)
} }
}) })
@ -561,5 +561,5 @@ func getRandomTaint() v1.Taint {
func addTaintToNode(cs clientset.Interface, nodeName string, testTaint v1.Taint) { func addTaintToNode(cs clientset.Interface, nodeName string, testTaint v1.Taint) {
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
} }

View File

@ -21,6 +21,7 @@ import (
"io" "io"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service" "k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service"
) )
@ -87,7 +88,7 @@ func (p PodDirIO) RemoveAll(path string) error {
} }
func (p PodDirIO) execute(command []string, stdin io.Reader) (string, string, error) { func (p PodDirIO) execute(command []string, stdin io.Reader) (string, string, error) {
return p.F.ExecWithOptions(framework.ExecOptions{ return e2epod.ExecWithOptions(p.F, e2epod.ExecOptions{
Command: command, Command: command,
Namespace: p.Namespace, Namespace: p.Namespace,
PodName: p.PodName, PodName: p.PodName,

View File

@ -146,7 +146,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
}, },
}, },
} }
pod = f.PodClient().CreateSync(pod) pod = e2epod.NewPodClient(f).CreateSync(pod)
defer func() { defer func() {
ginkgo.By("Cleaning up the secret") ginkgo.By("Cleaning up the secret")
@ -218,7 +218,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
gitServerPod := e2epod.NewAgnhostPod(f.Namespace.Name, gitServerPodName, nil, nil, []v1.ContainerPort{{ContainerPort: int32(containerPort)}}, "fake-gitserver") gitServerPod := e2epod.NewAgnhostPod(f.Namespace.Name, gitServerPodName, nil, nil, []v1.ContainerPort{{ContainerPort: int32(containerPort)}}, "fake-gitserver")
gitServerPod.ObjectMeta.Labels = labels gitServerPod.ObjectMeta.Labels = labels
f.PodClient().CreateSync(gitServerPod) e2epod.NewPodClient(f).CreateSync(gitServerPod)
// Portal IP and port // Portal IP and port
httpPort := 2345 httpPort := 2345

View File

@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessSSHKeyPresent()
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -78,8 +78,8 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
nodeKey = "mounted_flexvolume_expand_" + ns nodeKey = "mounted_flexvolume_expand_" + ns
nodeLabelValue = ns nodeLabelValue = ns
nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue} nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey)
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "flexvolume-resize", Name: "flexvolume-resize",

View File

@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessSSHKeyPresent()
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
var err error var err error
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
@ -73,8 +73,8 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
nodeKey = "mounted_flexvolume_expand_" + ns nodeKey = "mounted_flexvolume_expand_" + ns
nodeLabelValue = ns nodeLabelValue = ns
nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue} nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey)
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "flexvolume-resize", Name: "flexvolume-resize",

View File

@ -20,10 +20,11 @@ import (
"fmt" "fmt"
"os/exec" "os/exec"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -65,7 +66,7 @@ func doTestWriteAndReadToLocalSsd(f *framework.Framework) {
var msg string var msg string
var out = []string{"hello world"} var out = []string{"hello world"}
f.TestContainerOutput(msg, pod, 0, out) e2eoutput.TestContainerOutput(f, msg, pod, 0, out)
} }
func testPodWithSsd(command string) *v1.Pod { func testPodWithSsd(command string) *v1.Pod {

View File

@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() {
ginkgo.By("Create a pod for further testing") ginkgo.By("Create a pod for further testing")
hostBaseDir = path.Join("/tmp", ns) hostBaseDir = path.Join("/tmp", ns)
mountBaseDir = "/mnt/test" mountBaseDir = "/mnt/test"
basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate))
ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName))
targetDir = path.Join(hostBaseDir, "adir") targetDir = path.Join(hostBaseDir, "adir")
ginkgo.By("Should automatically create a new directory 'adir' when HostPathType is HostPathDirectoryOrCreate") ginkgo.By("Should automatically create a new directory 'adir' when HostPathType is HostPathDirectoryOrCreate")
@ -130,7 +130,7 @@ var _ = utils.SIGDescribe("HostPathType File [Slow]", func() {
ginkgo.By("Create a pod for further testing") ginkgo.By("Create a pod for further testing")
hostBaseDir = path.Join("/tmp", ns) hostBaseDir = path.Join("/tmp", ns)
mountBaseDir = "/mnt/test" mountBaseDir = "/mnt/test"
basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate))
ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName))
targetFile = path.Join(hostBaseDir, "afile") targetFile = path.Join(hostBaseDir, "afile")
ginkgo.By("Should automatically create a new file 'afile' when HostPathType is HostPathFileOrCreate") ginkgo.By("Should automatically create a new file 'afile' when HostPathType is HostPathFileOrCreate")
@ -198,7 +198,7 @@ var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() {
ginkgo.By("Create a pod for further testing") ginkgo.By("Create a pod for further testing")
hostBaseDir = path.Join("/tmp", ns) hostBaseDir = path.Join("/tmp", ns)
mountBaseDir = "/mnt/test" mountBaseDir = "/mnt/test"
basePod = f.PodClient().CreateSync(newHostPathTypeTestPodWithCommand(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate, fmt.Sprintf("nc -lU %s", path.Join(mountBaseDir, "asocket")))) basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPodWithCommand(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate, fmt.Sprintf("nc -lU %s", path.Join(mountBaseDir, "asocket"))))
ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName))
targetSocket = path.Join(hostBaseDir, "asocket") targetSocket = path.Join(hostBaseDir, "asocket")
}) })
@ -264,7 +264,7 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() {
ginkgo.By("Create a pod for further testing") ginkgo.By("Create a pod for further testing")
hostBaseDir = path.Join("/tmp", ns) hostBaseDir = path.Join("/tmp", ns)
mountBaseDir = "/mnt/test" mountBaseDir = "/mnt/test"
basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate))
ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName))
targetCharDev = path.Join(hostBaseDir, "achardev") targetCharDev = path.Join(hostBaseDir, "achardev")
ginkgo.By("Create a character device for further testing") ginkgo.By("Create a character device for further testing")
@ -334,7 +334,7 @@ var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() {
ginkgo.By("Create a pod for further testing") ginkgo.By("Create a pod for further testing")
hostBaseDir = path.Join("/tmp", ns) hostBaseDir = path.Join("/tmp", ns)
mountBaseDir = "/mnt/test" mountBaseDir = "/mnt/test"
basePod = f.PodClient().CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate)) basePod = e2epod.NewPodClient(f).CreateSync(newHostPathTypeTestPod(map[string]string{}, hostBaseDir, mountBaseDir, &hostPathDirectoryOrCreate))
ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("running on node %s", basePod.Spec.NodeName))
targetBlockDev = path.Join(hostBaseDir, "ablkdev") targetBlockDev = path.Join(hostBaseDir, "ablkdev")
ginkgo.By("Create a block device for further testing") ginkgo.By("Create a block device for further testing")

View File

@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
e2eskipper.SkipUnlessProviderIs("aws", "gce") e2eskipper.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -72,8 +72,8 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
nodeKey = "mounted_volume_expand_" + ns nodeKey = "mounted_volume_expand_" + ns
nodeLabelValue = ns nodeLabelValue = ns
nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue} nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey)
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "default", Name: "default",

View File

@ -490,7 +490,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
func countReadyNodes(c clientset.Interface, hostName types.NodeName) int { func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout) e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout) e2enode.WaitForAllNodesSchedulable(c, nodeStatusTimeout)
nodes, err := e2enode.GetReadySchedulableNodes(c) nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return len(nodes.Items) return len(nodes.Items)

View File

@ -43,6 +43,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
@ -802,7 +803,7 @@ func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig,
func createPodWithFsGroupTest(config *localTestConfig, testVol *localTestVolume, fsGroup int64, expectedFsGroup int64) *v1.Pod { func createPodWithFsGroupTest(config *localTestConfig, testVol *localTestVolume, fsGroup int64, expectedFsGroup int64) *v1.Pod {
pod, err := createLocalPod(config, testVol, &fsGroup) pod, err := createLocalPod(config, testVol, &fsGroup)
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = framework.LookForStringInPodExec(config.ns, pod.Name, []string{"stat", "-c", "%g", volumeDir}, strconv.FormatInt(expectedFsGroup, 10), time.Second*3) _, err = e2eoutput.LookForStringInPodExec(config.ns, pod.Name, []string{"stat", "-c", "%g", volumeDir}, strconv.FormatInt(expectedFsGroup, 10), time.Second*3)
framework.ExpectNoError(err, "failed to get expected fsGroup %d on directory %s in pod %s", fsGroup, volumeDir, pod.Name) framework.ExpectNoError(err, "failed to get expected fsGroup %d on directory %s in pod %s", fsGroup, volumeDir, pod.Name)
return pod return pod
} }

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -53,7 +54,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
client = f.ClientSet client = f.ClientSet
nameSpace = f.Namespace.Name nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
// Enforce binding only within test space via selector labels // Enforce binding only within test space via selector labels
volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace} volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace}

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
@ -74,7 +75,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
client = f.ClientSet client = f.ClientSet
nameSpace = f.Namespace.Name nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
ginkgo.By("Creating a PVC") ginkgo.By("Creating a PVC")
prefix := "pvc-protection" prefix := "pvc-protection"

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
@ -713,7 +714,7 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I
// Check that all pods have the same content // Check that all pods have the same content
index := i + 1 index := i + 1
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index)) ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index))
_, err := framework.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute) _, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block volume %s.", fileName) framework.ExpectNoError(err, "failed: finding the contents of the block volume %s.", fileName)
} else { } else {
fileName := "/mnt/volume1/index.html" fileName := "/mnt/volume1/index.html"
@ -721,7 +722,7 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I
// Check that all pods have the same content // Check that all pods have the same content
index := i + 1 index := i + 1
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index)) ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index))
_, err := framework.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute) _, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName) framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
} }
} }

View File

@ -35,6 +35,7 @@ import (
"k8s.io/component-helpers/storage/ephemeral" "k8s.io/component-helpers/storage/ephemeral"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
@ -259,7 +260,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume { if pattern.VolType != storageframework.GenericEphemeralVolume {
commands := e2evolume.GenerateReadFileCmd(datapath) commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) _, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -408,7 +409,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}) })
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
commands := e2evolume.GenerateReadFileCmd(datapath) commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) _, err = e2eoutput.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy") ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")

View File

@ -35,8 +35,10 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
@ -487,7 +489,7 @@ func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod,
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents}) e2eoutput.TestContainerOutput(f, "atomic-volume-subpath", pod, 0, []string{contents})
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := e2epod.DeletePodWithWait(f.ClientSet, pod) err := e2epod.DeletePodWithWait(f.ClientSet, pod)
@ -670,7 +672,7 @@ func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{ e2eoutput.TestContainerOutput(f, "multi_subpath", pod, containerIndex, []string{
"content of file \"" + file1 + "\": mount-tester new file", "content of file \"" + file1 + "\": mount-tester new file",
"content of file \"" + file2 + "\": mount-tester new file", "content of file \"" + file2 + "\": mount-tester new file",
}) })
@ -689,7 +691,7 @@ func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerInd
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod) removeUnusedContainers(pod)
f.TestContainerOutput("subpath", pod, containerIndex, []string{ e2eoutput.TestContainerOutput(f, "subpath", pod, containerIndex, []string{
"content of file \"" + file + "\": mount-tester new file", "content of file \"" + file + "\": mount-tester new file",
}) })
@ -1040,5 +1042,5 @@ func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string,
shell = "/bin/sh" shell = "/bin/sh"
option = "-c" option = "-c"
} }
return framework.RunKubectl(pod.Namespace, "exec", pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command) return e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command)
} }

View File

@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
@ -254,7 +255,7 @@ func testScriptInPod(
} }
e2epod.SetNodeSelection(&pod.Spec, config.ClientNodeSelection) e2epod.SetNodeSelection(&pod.Spec, config.ClientNodeSelection)
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName}) e2eoutput.TestContainerOutput(f, "exec-volume-test", pod, 0, []string{fileName})
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := e2epod.DeletePodWithWait(f.ClientSet, pod) err := e2epod.DeletePodWithWait(f.ClientSet, pod)

View File

@ -26,7 +26,7 @@ import (
"strings" "strings"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )

View File

@ -18,7 +18,7 @@ package storage
import ( import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -37,7 +37,7 @@ var _ = utils.SIGDescribe("Volume limits", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("aws", "gce", "gke") e2eskipper.SkipUnlessProviderIs("aws", "gce", "gke")
c = f.ClientSet c = f.ClientSet
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
}) })
ginkgo.It("should verify that all nodes have volume limits", func() { ginkgo.It("should verify that all nodes have volume limits", func() {

View File

@ -18,9 +18,10 @@ package vsphere
import ( import (
"context" "context"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"sync"
) )
var once sync.Once var once sync.Once

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -49,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
}) })
ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {

View File

@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -68,7 +69,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
ns = f.Namespace.Name ns = f.Namespace.Name
Bootstrap(f) Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo() nodeInfo = GetReadySchedulableRandomNodeInfo()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string) ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd" ssdlabels["volume-type"] = "ssd"
vvollabels = make(map[string]string) vvollabels = make(map[string]string)

View File

@ -109,7 +109,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
nodeSelectorList = createNodeLabels(client, namespace, nodes) nodeSelectorList = createNodeLabels(client, namespace, nodes)
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func() {
for _, node := range nodes.Items { for _, node := range nodes.Items {
framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey) e2enode.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
} }
}) })
}) })
@ -234,7 +234,7 @@ func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.No
labelValue: labelVal, labelValue: labelVal,
} }
nodeSelectorList = append(nodeSelectorList, nodeSelector) nodeSelectorList = append(nodeSelectorList, nodeSelector)
framework.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal) e2enode.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
} }
return nodeSelectorList return nodeSelectorList
} }

View File

@ -41,7 +41,9 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -358,14 +360,14 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) { func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
for _, filePath := range filePaths { for _, filePath := range filePaths {
_, err := framework.RunKubectl(namespace, "exec", podName, "--", "/bin/ls", filePath) _, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/ls", filePath)
framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName)) framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
} }
} }
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) { func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths { for _, filePath := range filePaths {
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath) err := e2eoutput.CreateEmptyFileOnPod(namespace, podName, filePath)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
@ -383,7 +385,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste
} }
// Verify Volumes are accessible // Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) _, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
@ -819,7 +821,7 @@ func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []str
// writeContentToPodFile writes the given content to the specified file. // writeContentToPodFile writes the given content to the specified file.
func writeContentToPodFile(namespace, podName, filePath, content string) error { func writeContentToPodFile(namespace, podName, filePath, content string) error {
_, err := framework.RunKubectl(namespace, "exec", podName, _, err := e2ekubectl.RunKubectl(namespace, "exec", podName,
"--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath)) "--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
return err return err
} }
@ -827,7 +829,7 @@ func writeContentToPodFile(namespace, podName, filePath, content string) error {
// expectFileContentToMatch checks if a given file contains the specified // expectFileContentToMatch checks if a given file contains the specified
// content, else fails. // content, else fails.
func expectFileContentToMatch(namespace, podName, filePath, content string) { func expectFileContentToMatch(namespace, podName, filePath, content string) {
_, err := framework.RunKubectl(namespace, "exec", podName, _, err := e2ekubectl.RunKubectl(namespace, "exec", podName,
"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath)) "--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName)) framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
} }

View File

@ -18,6 +18,7 @@ package vsphere
import ( import (
"context" "context"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -76,8 +77,8 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
nodeName = GetReadySchedulableRandomNodeInfo().Name nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID()) nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue} nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue) e2enode.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, client, nodeName, NodeLabelKey) ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, client, nodeName, NodeLabelKey)
}) })
ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {

View File

@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -109,7 +110,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
// Create Pod and verify the persistent volume is accessible // Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes) pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
_, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) _, err := e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Detach and delete volume // Detach and delete volume

View File

@ -26,7 +26,7 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodes, err := e2enode.GetReadySchedulableNodes(client) nodes, err := e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -136,7 +136,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
nodeKeyValueLabel := make(map[string]string) nodeKeyValueLabel := make(map[string]string)
nodeKeyValueLabel[labelKey] = nodeLabelValue nodeKeyValueLabel[labelKey] = nodeLabelValue
nodeKeyValueLabelList = append(nodeKeyValueLabelList, nodeKeyValueLabel) nodeKeyValueLabelList = append(nodeKeyValueLabelList, nodeKeyValueLabel)
framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue) e2enode.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue)
} }
}) })

View File

@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(err) framework.ExpectNoError(err)
workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR")
}) })

View File

@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(nodeList.Items) < 2 { if len(nodeList.Items) < 2 {

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -58,14 +59,14 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
Bootstrap(f) Bootstrap(f)
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns) node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func() {
if len(node1KeyValueLabel) > 0 { if len(node1KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey) e2enode.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
} }
if len(node2KeyValueLabel) > 0 { if len(node2KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey) e2enode.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
} }
}) })
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
@ -307,10 +308,10 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
ginkgo.By("Creating empty file on volume mounted on pod-A") ginkgo.By("Creating empty file on volume mounted on pod-A")
framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName) e2eoutput.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
ginkgo.By("Creating empty file volume mounted on pod-B") ginkgo.By("Creating empty file volume mounted on pod-B")
framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName) e2eoutput.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A") ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A")
@ -337,12 +338,12 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod
node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node1KeyValueLabel = make(map[string]string) node1KeyValueLabel = make(map[string]string)
node1KeyValueLabel[NodeLabelKey] = node1LabelValue node1KeyValueLabel[NodeLabelKey] = node1LabelValue
framework.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue) e2enode.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue)
node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node2KeyValueLabel = make(map[string]string) node2KeyValueLabel = make(map[string]string)
node2KeyValueLabel[NodeLabelKey] = node2LabelValue node2KeyValueLabel[NodeLabelKey] = node2LabelValue
framework.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue) e2enode.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue)
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
} }

View File

@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodes, err := e2enode.GetReadySchedulableNodes(client) nodes, err := e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -91,7 +91,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodes.Items[i].Name) nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodes.Items[i].Name)
nodeName := nodes.Items[i].Name nodeName := nodes.Items[i].Name
nodeLabel := "vsphere_e2e_" + string(uuid.NewUUID()) nodeLabel := "vsphere_e2e_" + string(uuid.NewUUID())
framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel) e2enode.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel)
vcHost := nodeInfo.VSphere.Config.Hostname vcHost := nodeInfo.VSphere.Config.Hostname
vcNodesMap[vcHost] = append(vcNodesMap[vcHost], node{ vcNodesMap[vcHost] = append(vcNodesMap[vcHost], node{

View File

@ -35,6 +35,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
@ -69,7 +70,7 @@ func cassandraKubectlCreate(ns, file string) {
framework.Fail(err.Error()) framework.Fail(err.Error())
} }
input := string(data) input := string(data)
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, input, "create", "-f", "-")
} }
// Setup creates a Cassandra StatefulSet and a PDB. It also brings up a tester // Setup creates a Cassandra StatefulSet and a PDB. It also brings up a tester

View File

@ -18,6 +18,7 @@ package apps
import ( import (
"context" "context"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"

View File

@ -35,6 +35,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
@ -68,7 +69,7 @@ func kubectlCreate(ns, file string) {
framework.Fail(err.Error()) framework.Fail(err.Error())
} }
input := string(data) input := string(data)
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, input, "create", "-f", "-")
} }
// Setup creates etcd statefulset and then verifies that the etcd is writable. // Setup creates etcd statefulset and then verifies that the etcd is writable.

View File

@ -35,6 +35,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
@ -70,7 +71,7 @@ func mysqlKubectlCreate(ns, file string) {
framework.Fail(err.Error()) framework.Fail(err.Error())
} }
input := string(data) input := string(data)
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, input, "create", "-f", "-")
} }
func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {

View File

@ -19,6 +19,7 @@ package apps
import ( import (
"context" "context"
"fmt" "fmt"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -18,10 +18,11 @@ package apps
import ( import (
"context" "context"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"

View File

@ -18,10 +18,12 @@ package node
import ( import (
"context" "context"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2esecurity "k8s.io/kubernetes/test/e2e/framework/security" e2esecurity "k8s.io/kubernetes/test/e2e/framework/security"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
@ -61,7 +63,7 @@ func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
// Create the initial test pod. // Create the initial test pod.
ginkgo.By("Creating a long-running AppArmor enabled pod.") ginkgo.By("Creating a long-running AppArmor enabled pod.")
t.pod = e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), false, false) t.pod = e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, false)
// Verify initial state. // Verify initial state.
t.verifyNodesAppArmorEnabled(f) t.verifyNodesAppArmorEnabled(f)
@ -88,7 +90,7 @@ func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod") ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
pod, err := f.PodClient().Get(context.TODO(), t.pod.Name, metav1.GetOptions{}) pod, err := e2epod.NewPodClient(f).Get(context.TODO(), t.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Should be able to get pod") framework.ExpectNoError(err, "Should be able to get pod")
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running") framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running")
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running") gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
@ -97,7 +99,7 @@ func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
ginkgo.By("Verifying an AppArmor profile is enforced for a new pod") ginkgo.By("Verifying an AppArmor profile is enforced for a new pod")
e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, f.PodClient(), false, true) e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true)
} }
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {

Some files were not shown because too many files have changed in this diff Show More