Merge pull request #112043 from pohly/e2e-sub-package-continuation

e2e: sub package refactoring
This commit is contained in:
Kubernetes Prow Robot 2022-10-06 13:49:53 -07:00 committed by GitHub
commit 2eda22a653
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
239 changed files with 2726 additions and 2252 deletions

View File

@ -67,7 +67,7 @@ dependencies:
match: ETCD_VERSION=
- path: staging/src/k8s.io/sample-apiserver/artifacts/example/deployment.yaml
match: gcr.io/etcd-development/etcd
- path: test/e2e/framework/nodes_util.go
- path: test/e2e/framework/providers/gcp.go
match: const etcdImage
- path: test/utils/image/manifest.go
match: configs\[Etcd\] = Config{list\.GcEtcdRegistry, "etcd", "\d+\.\d+.\d+(-(alpha|beta|rc).\d+)?(-\d+)?"}

View File

@ -42,6 +42,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/kube-openapi/pkg/validation/spec"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/utils/crd"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -76,22 +77,22 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with known and required properties")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, validCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create valid CR %s: %v", validCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
framework.Failf("failed to delete valid CR: %v", err)
}
if _, err := framework.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, validCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply valid CR %s: %v", validCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
framework.Failf("failed to delete valid CR: %v", err)
}
ginkgo.By("kubectl validation (kubectl create and apply) rejects request with value outside defined enum values")
badEnumValueCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar", "feeling":"NonExistentValue"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, badEnumValueCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `Unsupported value: "NonExistentValue"`) {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, badEnumValueCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `Unsupported value: "NonExistentValue"`) {
framework.Failf("unexpected no error when creating CR with unknown enum value: %v", err)
}
@ -99,20 +100,20 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
// Because server-side is default in beta but not GA yet, we will produce different behaviors in the default vs GA only conformance tests. We have made the error generic enough to pass both, but should go back and make the error more specific once server-side validation goes GA.
ginkgo.By("kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema")
unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) {
framework.Failf("unexpected no error when creating CR with unknown field: %v", err)
}
if _, err := framework.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, unknownCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `unknown field "foo"`) && !strings.Contains(err.Error(), `unknown field "spec.foo"`)) {
framework.Failf("unexpected no error when applying CR with unknown field: %v", err)
}
// TODO: see above note, we should check the value of the error once server-side validation is GA.
ginkgo.By("kubectl validation (kubectl create and apply) rejects request without required properties")
noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "create", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) {
framework.Failf("unexpected no error when creating CR without required field: %v", err)
}
if _, err := framework.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, noRequireCR, ns, "apply", "-f", "-"); err == nil || (!strings.Contains(err.Error(), `missing required field "name"`) && !strings.Contains(err.Error(), `spec.bars[0].name: Required value`)) {
framework.Failf("unexpected no error when applying CR without required field: %v", err)
}
@ -133,7 +134,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
}
ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist")
if _, err := framework.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, "explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
}
@ -160,16 +161,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
}
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
}
@ -201,16 +202,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
}
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
}
@ -243,16 +244,16 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
ginkgo.By("kubectl validation (kubectl create and apply) allows request with any unknown properties")
randomCR := fmt.Sprintf(`{%s,"spec":{"a":null,"b":[{"c":"d"}]}}`, meta)
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
}
if _, err := framework.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
if _, err := e2ekubectl.RunKubectlInput(f.Namespace.Name, randomCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
}
if _, err := framework.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
if _, err := e2ekubectl.RunKubectl(f.Namespace.Name, ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
}
@ -715,7 +716,7 @@ func dropDefaults(s *spec.Schema) {
}
func verifyKubectlExplain(ns, name, pattern string) error {
result, err := framework.RunKubectl(ns, "explain", name)
result, err := e2ekubectl.RunKubectl(ns, "explain", name)
if err != nil {
return fmt.Errorf("failed to explain %s: %v", name, err)
}

View File

@ -44,6 +44,7 @@ import (
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -1183,7 +1184,7 @@ func testAttachingPodWebhook(f *framework.Framework) {
ginkgo.By("'kubectl attach' the pod, should be denied by the webhook")
timer := time.NewTimer(30 * time.Second)
defer timer.Stop()
_, err = framework.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
_, err = e2ekubectl.NewKubectlCommand(f.Namespace.Name, "attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook")
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/cluster/ports"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -335,7 +336,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
}
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts {
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
e2edebug.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
}
})

View File

@ -53,6 +53,7 @@ import (
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/test/e2e/framework"
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
admissionapi "k8s.io/pod-security-admission/api"
@ -770,7 +771,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
return pod.DeletionTimestamp == nil && oldVersion == pod.Spec.Containers[0].Env[0].Value
}); pod != nil {
// make the /tmp/ready file read only, which will cause readiness to fail
if _, err := framework.RunKubectl(pod.Namespace, "exec", "-c", pod.Spec.Containers[0].Name, pod.Name, "--", "/bin/sh", "-ec", "echo 0 > /var/tmp/ready"); err != nil {
if _, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", "-c", pod.Spec.Containers[0].Name, pod.Name, "--", "/bin/sh", "-ec", "echo 0 > /var/tmp/ready"); err != nil {
framework.Logf("Failed to mark pod %s as unready via exec: %v", pod.Name, err)
} else {
framework.Logf("Marked old pod %s as unready", pod.Name)

View File

@ -19,10 +19,11 @@ package apps
import (
"context"
"fmt"
"github.com/onsi/gomega"
"strings"
"time"
"github.com/onsi/gomega"
jsonpatch "github.com/evanphx/json-patch"
"github.com/onsi/ginkgo/v2"

View File

@ -351,7 +351,7 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
pod := pods.Items[0]
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.OwnerReferences = nil
})
@ -370,7 +370,7 @@ var _ = SIGDescribe("Job", func() {
)).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
ginkgo.By("Removing the labels from the Job's Pod")
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.Labels = nil
})

View File

@ -596,7 +596,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
func testRCAdoptMatchingOrphans(f *framework.Framework) {
name := "pod-adoption"
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
p := f.PodClient().CreateSync(&v1.Pod{
p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{

View File

@ -323,7 +323,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
name := "pod-adoption-release"
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
p := f.PodClient().CreateSync(&v1.Pod{
p := e2epod.NewPodClient(f).CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{

View File

@ -45,8 +45,10 @@ import (
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
@ -121,7 +123,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.AfterEach(func() {
if ginkgo.CurrentSpecReport().Failed() {
framework.DumpDebugInfo(c, ns)
e2eoutput.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all statefulset in ns %v", ns)
e2estatefulset.DeleteAllStatefulSets(c, ns)
@ -195,7 +197,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectEqual(controllerRef.UID, ss.UID)
ginkgo.By("Orphaning one of the stateful set's pods")
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.OwnerReferences = nil
})
@ -215,7 +217,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Removing the labels from one of the stateful set's pods")
prevLabels := pod.Labels
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.Labels = nil
})
@ -232,7 +234,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// If we don't do this, the test leaks the Pod and PVC.
ginkgo.By("Readding labels to the stateful set's pod")
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
e2epod.NewPodClient(f).Update(pod.Name, func(pod *v1.Pod) {
pod.Labels = prevLabels
})
@ -1108,7 +1110,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.AfterEach(func() {
if ginkgo.CurrentSpecReport().Failed() {
framework.DumpDebugInfo(c, ns)
e2eoutput.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all statefulset in ns %v", ns)
e2estatefulset.DeleteAllStatefulSets(c, ns)
@ -1201,7 +1203,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 2)
ginkgo.By("check availableReplicas are shown in status")
out, err := framework.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml")
out, err := e2ekubectl.RunKubectl(ns, "get", "statefulset", ss.Name, "-o=yaml")
framework.ExpectNoError(err)
if !strings.Contains(out, "availableReplicas: 2") {
framework.Failf("invalid number of availableReplicas: expected=%v received=%v", 2, out)
@ -1231,7 +1233,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.AfterEach(func() {
if ginkgo.CurrentSpecReport().Failed() {
framework.DumpDebugInfo(c, ns)
e2eoutput.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all statefulset in ns %v", ns)
e2estatefulset.DeleteAllStatefulSets(c, ns)
@ -1350,7 +1352,7 @@ var _ = SIGDescribe("StatefulSet", func() {
func kubectlExecWithRetries(ns string, args ...string) (out string) {
var err error
for i := 0; i < 3; i++ {
if out, err = framework.RunKubectl(ns, args...); err == nil {
if out, err = e2ekubectl.RunKubectl(ns, args...); err == nil {
return
}
framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
@ -1414,14 +1416,14 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
for k, v := range kv {
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
framework.Logf(framework.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
framework.Logf(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
}
}
func (z *zookeeperTester) read(statefulPodIndex int, key string) string {
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh get /%v", key)
return lastLine(framework.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
return lastLine(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
}
type mysqlGaleraTester struct {
@ -1478,7 +1480,7 @@ func (m *redisTester) name() string {
func (m *redisTester) redisExec(cmd, ns, podName string) string {
cmd = fmt.Sprintf("/opt/redis/redis-cli -h %v %v", podName, cmd)
return framework.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd)
return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
@ -1509,7 +1511,7 @@ func (c *cockroachDBTester) name() string {
func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
cmd = fmt.Sprintf("/cockroach/cockroach sql --insecure --host %s.cockroachdb -e \"%v\"", podName, cmd)
return framework.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd)
return e2ekubectl.RunKubectlOrDie(ns, "exec", podName, "--", "/bin/sh", "-c", cmd)
}
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
@ -1710,7 +1712,7 @@ func breakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
stdout, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout)
stdout, err := e2eoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout)
framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err
}
@ -1734,7 +1736,7 @@ func restorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
stdout, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout)
stdout, err := e2eoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulSetPoll, statefulPodTimeout)
framework.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err
}

View File

@ -37,7 +37,7 @@ var _ = SIGDescribe("Conformance Tests", func() {
*/
framework.ConformanceIt("should have at least two untainted nodes", func() {
ginkgo.By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
if len(nodeList.Items) < 2 {

View File

@ -26,12 +26,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
@ -60,7 +61,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
for _, nodeIP := range nodeIPs {
// Anonymous authentication is disabled by default
host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort))
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s/metrics", "%{http_code}", host))
result := e2eoutput.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s/metrics", "%{http_code}", host))
gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
}
})
@ -82,7 +83,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
for _, nodeIP := range nodeIPs {
host := net.JoinHostPort(nodeIP, strconv.Itoa(ports.KubeletPort))
result := framework.RunHostCmdOrDie(ns,
result := e2eoutput.RunHostCmdOrDie(ns,
pod.Name,
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s/metrics",
"%{http_code}",
@ -96,5 +97,5 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "agnhost-pod", nil, nil, nil)
pod.ObjectMeta.GenerateName = "test-node-authn-"
return f.PodClient().CreateSync(pod)
return e2epod.NewPodClient(f).CreateSync(pod)
}

View File

@ -19,9 +19,10 @@ package auth
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"

View File

@ -41,6 +41,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -316,7 +317,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, `[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*`),
}
f.TestContainerOutputRegexp("service account token: ", pod, 0, output)
e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output)
})
/*
@ -424,7 +425,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID),
fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID),
}
f.TestContainerOutputRegexp("service account token: ", pod, 0, output)
e2eoutput.TestContainerOutputRegexp(f, "service account token: ", pod, 0, output)
}
})

View File

@ -24,7 +24,7 @@ import (
"strings"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"

View File

@ -44,6 +44,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -381,7 +382,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, resizeTimeout))
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC")
@ -564,7 +565,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
removeLabels := func(nodesToClean sets.String) {
ginkgo.By("Removing labels from nodes")
for node := range nodesToClean {
framework.RemoveLabelOffNode(c, node, labelKey)
e2enode.RemoveLabelOffNode(c, node, labelKey)
}
}
@ -575,7 +576,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
for node := range nodesSet {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
e2enode.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
err = scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
@ -593,7 +594,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
if len(newNodesSet) > 1 {
ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
klog.Infof("Usually only 1 new node is expected, investigating")
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
klog.Infof("Kubectl:%s\n", e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
if output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
@ -629,7 +630,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
ginkgo.By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
for node := range registeredNodes {
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
e2enode.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
}
defer removeLabels(registeredNodes)
@ -1416,8 +1417,8 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface
klog.Infof("Too many pods are not ready yet: %v", notready)
}
klog.Info("Timeout on waiting for pods being ready")
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces"))
klog.Info(framework.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
klog.Info(e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "pods", "-o", "json", "--all-namespaces"))
klog.Info(e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "get", "nodes", "-o", "json"))
// Some pods are still not running.
return fmt.Errorf("Too many pods are still not running: %v", notready)

View File

@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"
"k8s.io/kubernetes/test/e2e/upgrades"
"k8s.io/kubernetes/test/utils/junit"
)
@ -80,7 +81,7 @@ func controlPlaneUpgrade(f *framework.Framework, v string, extraEnvs []string) e
case "gce":
return controlPlaneUpgradeGCE(v, extraEnvs)
case "gke":
return framework.MasterUpgradeGKE(f.Namespace.Name, v)
return e2eproviders.MasterUpgradeGKE(f.Namespace.Name, v)
default:
return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
}
@ -101,7 +102,7 @@ func controlPlaneUpgradeGCE(rawV string, extraEnvs []string) error {
}
v := "v" + rawV
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-M", v)
_, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-M", v)
return err
}
@ -172,10 +173,10 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error {
env := append(os.Environ(), extraEnvs...)
if img != "" {
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", "-o", v)
_, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-N", "-o", v)
return err
}
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", v)
_, _, err := framework.RunCmdEnv(env, e2eproviders.GCEUpgradeScript(), "-N", v)
return err
}
@ -191,7 +192,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error {
"container",
"clusters",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
framework.LocationParamGKE(),
e2eproviders.LocationParamGKE(),
"upgrade",
framework.TestContext.CloudConfig.Cluster,
fmt.Sprintf("--node-pool=%s", np),
@ -207,7 +208,7 @@ func nodeUpgradeGKE(namespace string, v string, img string) error {
return err
}
framework.WaitForSSHTunnels(namespace)
e2enode.WaitForSSHTunnels(namespace)
}
return nil
}
@ -217,7 +218,7 @@ func nodePoolsGKE() ([]string, error) {
"container",
"node-pools",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
framework.LocationParamGKE(),
e2eproviders.LocationParamGKE(),
"list",
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
"--format=get(name)",

View File

@ -183,7 +183,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
for _, zone := range additionalNodesZones {
removeWorkerNodes(zone)
}
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute))
// Clean-up additional master replicas if the test execution was broken.
for _, zone := range additionalReplicaZones {
@ -218,7 +218,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone)
}
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
framework.ExpectNoError(e2enode.AllNodesReady(c, 5*time.Minute))
// Verify that API server works correctly with HA master.
rcName := "ha-master-" + strconv.Itoa(len(existingRCs))

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -79,7 +80,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1",
})
})
@ -123,7 +124,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
})

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
@ -44,7 +44,7 @@ type ConformanceContainer struct {
Volumes []v1.Volume
ImagePullSecrets []string
PodClient *framework.PodClient
PodClient *e2epod.PodClient
podName string
PodSecurityContext *v1.PodSecurityContext
}

View File

@ -57,11 +57,11 @@ const (
var _ = SIGDescribe("Probing container", func() {
f := framework.NewDefaultFramework("container-probe")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *framework.PodClient
var podClient *e2epod.PodClient
probe := webserverProbeBuilder{}
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
/*
@ -561,7 +561,7 @@ var _ = SIGDescribe("Probing container", func() {
ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func() {
podName := "probe-test-" + string(uuid.NewUUID())
podClient := f.PodClient()
podClient := e2epod.NewPodClient(f)
terminationGracePeriod := int64(30)
script := `
_term() {
@ -625,7 +625,7 @@ done
ginkgo.It("should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating", func() {
podName := "probe-test-" + string(uuid.NewUUID())
podClient := f.PodClient()
podClient := e2epod.NewPodClient(f)
terminationGracePeriod := int64(30)
script := `
_term() {
@ -937,7 +937,7 @@ func (b webserverProbeBuilder) build() *v1.Probe {
// RunLivenessTest verifies the number of restarts for pod with given expected number of restarts
func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
containerName := pod.Spec.Containers[0].Name
@ -997,7 +997,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
}
func runReadinessFailTest(f *framework.Framework, pod *v1.Pod, notReadyUntil time.Duration) {
podClient := f.PodClient()
podClient := e2epod.NewPodClient(f)
ns := f.Namespace.Name
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())

View File

@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -38,7 +39,7 @@ var _ = SIGDescribe("Containers", func() {
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name)
pod.Spec.Containers[0].Args = nil
pod = f.PodClient().Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Expected pod %q to be running, got error: %v", pod.Name, err)
pollLogs := func() (string, error) {
@ -57,7 +58,7 @@ var _ = SIGDescribe("Containers", func() {
*/
framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
f.TestContainerOutput("override arguments", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "override arguments", pod, 0, []string{
"[/agnhost entrypoint-tester override arguments]",
})
})
@ -73,7 +74,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
f.TestContainerOutput("override command", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "override command", pod, 0, []string{
"[/agnhost-2 entrypoint-tester]",
})
})
@ -87,7 +88,7 @@ var _ = SIGDescribe("Containers", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
f.TestContainerOutput("override all", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "override all", pod, 0, []string{
"[/agnhost-2 entrypoint-tester override arguments]",
})
})

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -418,5 +419,5 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
}

View File

@ -34,9 +34,9 @@ import (
var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
f := framework.NewDefaultFramework("ephemeral-containers-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *framework.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
// Release: 1.25
@ -74,7 +74,7 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
ginkgo.By("checking pod container endpoints")
// Can't use anything depending on kubectl here because it's not available in the node test environment
output := f.ExecCommandInContainer(pod.Name, ecName, "/bin/echo", "marco")
output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco")
gomega.Expect(output).To(gomega.ContainSubstring("marco"))
log, err := e2epod.GetPodLogs(f.ClientSet, pod.Namespace, pod.Name, ecName)
framework.ExpectNoError(err, "Failed to get logs for pod %q ephemeral container %q", format.Pod(pod), ecName)

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -57,7 +58,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
}
pod := newPod([]string{"sh", "-c", "env"}, envVars, nil, nil)
f.TestContainerOutput("env composition", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "env composition", pod, 0, []string{
"FOO=foo-value",
"BAR=bar-value",
"FOOBAR=foo-value;;bar-value",
@ -78,7 +79,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
}
pod := newPod([]string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""}, envVars, nil, nil)
f.TestContainerOutput("substitution in container's command", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "substitution in container's command", pod, 0, []string{
"test-value",
})
})
@ -98,7 +99,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod := newPod([]string{"sh", "-c"}, envVars, nil, nil)
pod.Spec.Containers[0].Args = []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""}
f.TestContainerOutput("substitution in container's args", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "substitution in container's args", pod, 0, []string{
"test-value",
})
})
@ -138,7 +139,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
envVars[0].Value = pod.ObjectMeta.Name
pod.Spec.Containers[0].Command = []string{"sh", "-c", "test -d /testcontainer/" + pod.ObjectMeta.Name + ";echo $?"}
f.TestContainerOutput("substitution in volume subpath", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "substitution in volume subpath", pod, 0, []string{
"0",
})
})
@ -261,7 +262,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod.ObjectMeta.Annotations = map[string]string{"notmysubpath": "mypath"}
ginkgo.By("creating the pod with failed condition")
var podClient *framework.PodClient = f.PodClient()
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
@ -333,7 +334,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
pod.ObjectMeta.Annotations = map[string]string{"mysubpath": "mypath"}
ginkgo.By("creating the pod")
var podClient *framework.PodClient = f.PodClient()
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
ginkgo.By("waiting for pod running")
@ -342,14 +343,14 @@ var _ = SIGDescribe("Variable Expansion", func() {
ginkgo.By("creating a file in subpath")
cmd := "touch /volume_mount/mypath/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to write to subpath")
}
ginkgo.By("test for file in mounted path")
cmd = "test -f /subpath_mount/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(f, pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to verify file")
}
@ -370,7 +371,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
})
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
var podClient *framework.PodClient = f.PodClient()
podClient := e2epod.NewPodClient(f)
pod = podClient.Create(pod)
defer func() {

View File

@ -39,6 +39,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -160,9 +161,9 @@ func initContainersInvariants(pod *v1.Pod) error {
var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
f := framework.NewDefaultFramework("init-container")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *framework.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
/*

View File

@ -37,9 +37,9 @@ import (
var _ = SIGDescribe("Kubelet", func() {
f := framework.NewDefaultFramework("kubelet-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *framework.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
ginkgo.Context("when scheduling a busybox command in a pod", func() {
podName := "busybox-scheduling-" + string(uuid.NewUUID())

View File

@ -92,12 +92,12 @@ func (config *KubeletManagedHostConfig) setup() {
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
podSpec := config.createPodSpec(etcHostsPodName)
config.pod = config.f.PodClient().CreateSync(podSpec)
config.pod = e2epod.NewPodClient(config.f).CreateSync(podSpec)
}
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
config.hostNetworkPod = config.f.PodClient().CreateSync(podSpec)
config.hostNetworkPod = e2epod.NewPodClient(config.f).CreateSync(podSpec)
}
func assertManagedStatus(
@ -148,7 +148,7 @@ func assertManagedStatus(
}
func (config *KubeletManagedHostConfig) getFileContents(podName, containerName, path string) string {
return config.f.ExecCommandInContainer(podName, containerName, "cat", path)
return e2epod.ExecCommandInContainer(config.f, podName, containerName, "cat", path)
}
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {

View File

@ -37,7 +37,7 @@ import (
var _ = SIGDescribe("Container Lifecycle Hook", func() {
f := framework.NewDefaultFramework("container-lifecycle-hook")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *framework.PodClient
var podClient *e2epod.PodClient
const (
podCheckInterval = 1 * time.Second
postStartWaitTimeout = 2 * time.Minute
@ -60,7 +60,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
e2epod.SetAffinity(&nodeSelection, targetNode)
e2epod.SetNodeSelection(&podHandleHookRequest.Spec, nodeSelection)
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
ginkgo.By("create the container to handle the HTTPGet hook request.")
newPod := podClient.CreateSync(podHandleHookRequest)
targetIP = newPod.Status.PodIP
@ -80,7 +80,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
}
ginkgo.By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
ginkgo.By("check prestop hook")
gomega.Eventually(func() error {

View File

@ -57,7 +57,7 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() {
NodeName: linuxNode.Name, // Set the node to an node which doesn't support
},
}
pod = f.PodClient().Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
// Check the pod is still not running
err = e2epod.WaitForPodFailedReason(f.ClientSet, pod, "PodOSNotSupported", f.Timeouts.PodStartShort)
framework.ExpectNoError(err)

View File

@ -51,6 +51,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2ewebsocket "k8s.io/kubernetes/test/e2e/framework/websocket"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -68,7 +69,7 @@ const (
)
// testHostIP tests that a pod gets a host IP
func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
func testHostIP(podClient *e2epod.PodClient, pod *v1.Pod) {
ginkgo.By("creating pod")
podClient.CreateSync(pod)
@ -91,7 +92,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
}
}
func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
func startPodAndGetBackOffs(podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod)
time.Sleep(sleepAmount)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
@ -118,7 +119,7 @@ func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAm
return delay1, delay2
}
func getRestartDelay(podClient *framework.PodClient, podName string, containerName string) (time.Duration, error) {
func getRestartDelay(podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) {
beginTime := time.Now()
var previousRestartCount int32 = -1
var previousFinishedAt time.Time
@ -187,11 +188,11 @@ func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interf
var _ = SIGDescribe("Pods", func() {
f := framework.NewDefaultFramework("pods")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelRestricted
var podClient *framework.PodClient
var podClient *e2epod.PodClient
var dc dynamic.Interface
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
dc = f.DynamicClient
})
@ -305,7 +306,7 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("verifying pod deletion was observed")
deleted := false
var lastPod *v1.Pod
timer := time.After(framework.DefaultPodDeletionTimeout)
timer := time.After(e2epod.DefaultPodDeletionTimeout)
for !deleted {
select {
case event := <-w.ResultChan():
@ -522,7 +523,7 @@ var _ = SIGDescribe("Pods", func() {
"FOOSERVICE_PORT_8765_TCP_ADDR=",
}
expectNoErrorWithRetries(func() error {
return f.MatchContainerOutput(pod, containerName, expectedVars, gomega.ContainSubstring)
return e2epodoutput.MatchContainerOutput(f, pod, containerName, expectedVars, gomega.ContainSubstring)
}, maxRetries, "Container should have service environment variables set")
})
@ -807,7 +808,7 @@ var _ = SIGDescribe("Pods", func() {
}
ginkgo.By("submitting the pod to kubernetes")
f.PodClient().Create(pod)
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
if podClient.PodIsReady(podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name)

View File

@ -20,9 +20,10 @@ import (
"fmt"
"github.com/onsi/ginkgo/v2"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -66,16 +67,16 @@ func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool)
cmd := []string{"ip", "link", "add", "dummy1", "type", "dummy"}
reverseCmd := []string{"ip", "link", "del", "dummy1"}
stdout, stderr, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, cmd...)
stdout, stderr, err := e2epod.ExecCommandInContainerWithFullOutput(
c.f, c.privilegedPod, containerName, cmd...)
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
if expectSuccess {
framework.ExpectNoError(err, msg)
// We need to clean up the dummy link that was created, as it
// leaks out into the node level -- yuck.
_, _, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, reverseCmd...)
_, _, err := e2epod.ExecCommandInContainerWithFullOutput(
c.f, c.privilegedPod, containerName, reverseCmd...)
framework.ExpectNoError(err,
fmt.Sprintf("could not remove dummy1 link: %v", err))
} else {
@ -115,5 +116,5 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
func (c *PrivilegedPodTestConfig) createPods() {
podSpec := c.createPodsSpec()
c.pod = c.f.PodClient().CreateSync(podSpec)
c.pod = e2epod.NewPodClient(c.f).CreateSync(podSpec)
}

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -98,7 +99,7 @@ while true; do sleep 1; done
testContainer.Name = testCase.Name
testContainer.Command = []string{"sh", "-c", tmpCmd}
terminateContainer := ConformanceContainer{
PodClient: f.PodClient(),
PodClient: e2epod.NewPodClient(f),
Container: testContainer,
RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes,
@ -143,7 +144,7 @@ while true; do sleep 1; done
matchTerminationMessage := func(container v1.Container, expectedPhase v1.PodPhase, expectedMsg gomegatypes.GomegaMatcher) {
container.Name = "termination-message-container"
c := ConformanceContainer{
PodClient: f.PodClient(),
PodClient: e2epod.NewPodClient(f),
Container: container,
RestartPolicy: v1.RestartPolicyNever,
}
@ -268,7 +269,7 @@ while true; do sleep 1; done
command = []string{"ping", "-t", "localhost"}
}
container := ConformanceContainer{
PodClient: f.PodClient(),
PodClient: e2epod.NewPodClient(f),
Container: v1.Container{
Name: "image-pull-test",
Image: image,

View File

@ -62,7 +62,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil)
defer deleteRuntimeClass(f, rcName)
pod := f.PodClient().Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
@ -89,7 +89,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName)
pod := f.PodClient().Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(f, pod)
})
@ -104,7 +104,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func() {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName)
pod := f.PodClient().Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
@ -134,7 +134,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
},
})
defer deleteRuntimeClass(f, rcName)
pod := f.PodClient().Create(e2enode.NewRuntimeClassPod(rcName))
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
// there is only one pod in the namespace
label := labels.SelectorFromSet(labels.Set(map[string]string{}))
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -81,7 +82,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{
"SECRET_DATA=value-1",
})
})
@ -125,7 +126,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume secrets", pod, 0, []string{
"data-1=value-1", "data-2=value-2", "data-3=value-3",
"p-data-1=value-1", "p-data-2=value-2", "p-data-3=value-3",
})

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -45,9 +46,9 @@ var (
var _ = SIGDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var podClient *framework.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
ginkgo.Context("When creating a pod with HostUsers", func() {
@ -73,14 +74,14 @@ var _ = SIGDescribe("Security Context", func() {
ginkgo.It("must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() {
// with hostUsers=false the pod must use a new user namespace
podClient := f.PodClientNS(f.Namespace.Name)
podClient := e2epod.PodClientNS(f, f.Namespace.Name)
createdPod1 := podClient.Create(makePod(false))
createdPod2 := podClient.Create(makePod(false))
defer func() {
ginkgo.By("delete the pods")
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}()
getLogs := func(pod *v1.Pod) (string, error) {
err := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
@ -115,7 +116,7 @@ var _ = SIGDescribe("Security Context", func() {
// When running in the host's user namespace, the /proc/self/uid_map file content looks like:
// 0 0 4294967295
// Verify the value 4294967295 is present in the output.
f.TestContainerOutput("read namespace", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "read namespace", pod, 0, []string{
"4294967295",
})
})
@ -239,7 +240,7 @@ var _ = SIGDescribe("Security Context", func() {
// Each line should be "=0" that means root inside the container is the owner of the file.
downwardAPIVolFiles := 1
projectedFiles := len(secret.Data) + downwardAPIVolFiles
f.TestContainerOutput("check file permissions", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "check file permissions", pod, 0, []string{
strings.Repeat("=0\n", len(secret.Data)+len(configMap.Data)+downwardAPIVolFiles+projectedFiles),
})
})
@ -299,7 +300,7 @@ var _ = SIGDescribe("Security Context", func() {
// Expect one line for each file on all the volumes.
// Each line should be "=200" (fsGroup) that means it was mapped to the
// right user inside the container.
f.TestContainerOutput("check FSGroup is mapped correctly", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "check FSGroup is mapped correctly", pod, 0, []string{
strings.Repeat(fmt.Sprintf("=%v\n", fsGroup), len(configMap.Data)),
})
})

View File

@ -41,7 +41,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
f := framework.NewDefaultFramework("sysctl")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var podClient *framework.PodClient
var podClient *e2epod.PodClient
testPod := func() *v1.Pod {
podName := "sysctl-" + string(uuid.NewUUID())
@ -65,7 +65,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
}
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
/*
@ -93,7 +93,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil())
@ -201,7 +201,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
ev, err := e2epod.NewPodClient(f).WaitForErrorEventOrSuccess(pod)
framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil())

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -148,7 +149,7 @@ var _ = SIGDescribe("ConfigMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@ -215,7 +216,7 @@ var _ = SIGDescribe("ConfigMap", func() {
})
ginkgo.By("Creating the pod")
f.PodClient().Create(pod)
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
pollLogs1 := func() (string, error) {
@ -374,7 +375,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -485,7 +486,7 @@ var _ = SIGDescribe("ConfigMap", func() {
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
})
@ -621,7 +622,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup
"content of file \"/etc/configmap-volume/data-1\": value-1",
fileModeRegexp,
}
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
@ -673,7 +674,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int
fileModeRegexp := getFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp)
}
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
@ -690,7 +691,7 @@ func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath strin
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}
@ -720,7 +721,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
}
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -133,5 +134,5 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
e2epodoutput.TestContainerOutputRegexp(f, "downward api env vars", pod, 0, expectations)
}

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -39,9 +40,9 @@ var _ = SIGDescribe("Downward API volume", func() {
const podLogTimeout = 3 * time.Minute
f := framework.NewDefaultFramework("downward-api")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var podClient *framework.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
/*
@ -53,7 +54,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -69,7 +70,7 @@ var _ = SIGDescribe("Downward API volume", func() {
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -85,7 +86,7 @@ var _ = SIGDescribe("Downward API volume", func() {
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -100,7 +101,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -116,7 +117,7 @@ var _ = SIGDescribe("Downward API volume", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----",
})
})
@ -193,7 +194,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
@ -207,7 +208,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
@ -221,7 +222,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
@ -235,7 +236,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
@ -249,7 +250,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
@ -261,7 +262,7 @@ var _ = SIGDescribe("Downward API volume", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -282,11 +283,11 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
}
ginkgo.By("Creating Pod")
f.PodClient().Create(pod)
e2epod.NewPodClient(f).Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
ginkgo.By("Reading file content from the nginx-container")
result := f.ExecShellInContainer(pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath))
result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath))
framework.ExpectEqual(result, message, "failed to match expected string %s with %s", message, resultString)
})
@ -342,18 +343,18 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
var err error
ginkgo.By("Creating Pod")
pod = f.PodClient().CreateSync(pod)
pod = e2epod.NewPodClient(f).CreateSync(pod)
ginkgo.By("Waiting for the pod running")
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
ginkgo.By("Getting the pod")
pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %s", pod.Name)
ginkgo.By("Reading empty dir size")
result := f.ExecShellInContainer(pod.Name, busyBoxMainContainerName, fmt.Sprintf("df | grep %s | awk '{print $2}'", busyBoxMainVolumeMountPath))
result := e2epod.ExecShellInContainer(f, pod.Name, busyBoxMainContainerName, fmt.Sprintf("df | grep %s | awk '{print $2}'", busyBoxMainVolumeMountPath))
framework.ExpectEqual(result, expectedResult, "failed to match expected string %s with %s", expectedResult, result)
})
})
@ -390,7 +391,7 @@ func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMed
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -423,7 +424,7 @@ func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMe
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -448,7 +449,7 @@ func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.Storag
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -476,7 +477,7 @@ func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMediu
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -498,7 +499,7 @@ func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -523,7 +524,7 @@ func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -548,7 +549,7 @@ func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
@ -573,7 +574,7 @@ func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
e2epodoutput.TestContainerOutput(f, msg, pod, 0, out)
}
func formatMedium(medium v1.StorageMedium) string {

View File

@ -21,9 +21,10 @@ import (
"os"
"path"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -58,7 +59,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath),
}
f.TestContainerOutputRegexp("hostPath mode", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(f, "hostPath mode", pod, 0, []string{
"mode of file \"/test-volume\": dg?trwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
})
})
@ -87,7 +88,7 @@ var _ = SIGDescribe("HostPath", func() {
}
//Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod.
f.TestContainerOutput("hostPath r/w", pod, 1, []string{
e2epodoutput.TestContainerOutput(f, "hostPath r/w", pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file",
})
})
@ -124,7 +125,7 @@ var _ = SIGDescribe("HostPath", func() {
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
e2epodoutput.TestContainerOutput(f, "hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -88,7 +89,7 @@ var _ = SIGDescribe("Projected combined", func() {
},
},
}
f.TestContainerOutput("Check all projections for projected volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "Check all projections for projected volume plugin", pod, 0, []string{
podName,
"secret-value-1",
"configmap-value-1",

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -147,7 +148,7 @@ var _ = SIGDescribe("Projected configMap", func() {
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@ -326,7 +327,7 @@ var _ = SIGDescribe("Projected configMap", func() {
},
}
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -450,7 +451,7 @@ var _ = SIGDescribe("Projected configMap", func() {
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "consume configMaps", pod, 0, []string{
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
})
@ -512,7 +513,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool,
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
fileModeRegexp,
}
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
@ -563,7 +564,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs
fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode)
output = append(output, fileModeRegexp)
}
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
e2epodoutput.TestContainerOutputRegexp(f, "consume configMaps", pod, 0, output)
}
func createProjectedConfigMapMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -39,9 +40,9 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
var podClient *framework.PodClient
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
podClient = e2epod.NewPodClient(f)
})
/*
@ -53,7 +54,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -69,7 +70,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
defaultMode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -85,7 +86,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
mode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
@ -100,7 +101,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
@ -116,7 +117,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
FSGroup: &gid,
}
setPodNonRootUser(pod)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----",
})
})
@ -193,7 +194,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
@ -207,7 +208,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
@ -221,7 +222,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
@ -235,7 +236,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
e2epodoutput.TestContainerOutput(f, "downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
@ -249,7 +250,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
@ -261,7 +262,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
e2epodoutput.TestContainerOutputRegexp(f, "downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -200,7 +201,7 @@ var _ = SIGDescribe("Projected secret", func() {
}
fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", nil)
f.TestContainerOutputRegexp("consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
"content of file \"/etc/projected-secret-volume/data-1\": value-1",
fileModeRegexp,
})
@ -367,7 +368,7 @@ var _ = SIGDescribe("Projected secret", func() {
},
}
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -504,7 +505,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
fileModeRegexp,
}
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}
func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
@ -581,5 +582,5 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp,
}
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -190,7 +191,7 @@ var _ = SIGDescribe("Secrets", func() {
}
fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", nil)
f.TestContainerOutputRegexp("consume secrets", pod, 0, []string{
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
fileModeRegexp,
})
@ -333,7 +334,7 @@ var _ = SIGDescribe("Secrets", func() {
},
}
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
e2epod.NewPodClient(f).CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
@ -534,7 +535,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
fileModeRegexp,
}
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
@ -602,7 +603,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
fileModeRegexp,
}
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
e2epodoutput.TestContainerOutputRegexp(f, "consume secrets", pod, 0, expectedOutput)
}
func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error {
@ -649,7 +650,7 @@ func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName
},
}
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}
@ -710,6 +711,6 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
},
}
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
pod = e2epod.NewPodClient(f).Create(pod)
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
}

View File

@ -41,6 +41,7 @@ import (
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/daemonset"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -223,7 +224,7 @@ func setupSuite() {
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
@ -242,7 +243,7 @@ func setupSuite() {
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
e2edebug.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
@ -270,7 +271,7 @@ func setupSuite() {
}
if framework.TestContext.NodeKiller.Enabled {
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
nodeKiller := e2enode.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh)
}
}

View File

@ -62,6 +62,11 @@ import (
_ "k8s.io/kubernetes/test/e2e/storage"
_ "k8s.io/kubernetes/test/e2e/storage/external"
_ "k8s.io/kubernetes/test/e2e/windows"
// reconfigure framework
_ "k8s.io/kubernetes/test/e2e/framework/debug/init"
_ "k8s.io/kubernetes/test/e2e/framework/metrics/init"
_ "k8s.io/kubernetes/test/e2e/framework/node/init"
)
// handleFlags sets up all flags and parses the command line.

View File

@ -0,0 +1,88 @@
# Overview
The Kubernetes E2E framework simplifies writing Ginkgo tests suites. It's main
usage is for these tests suites in the Kubernetes repository itself:
- test/e2e: runs as client for a Kubernetes cluster. The e2e.test binary is
used for conformance testing.
- test/e2e_node: runs on the same node as a kublet instance. Used for testing
kubelet.
- test/e2e_kubeadm: test suite for kubeadm.
Usage of the framework outside of Kubernetes is possible, but not encouraged.
Downstream users have to be prepared to deal with API changes.
# Code Organization
The core framework is the `k8s.io/kubernetes/test/e2e/framework` package. It
contains functionality that all E2E suites are expected to need:
- connecting to the apiserver
- managing per-test namespaces
- logging (`Logf`)
- failure handling (`Fail`, `Failf`)
- writing concise JUnit test results
It also contains a `TestContext` with settings that can be controlled via
command line flags. For historic reasons, this also contains settings for
individual tests or packages that are not part of the core framework.
Optional functionality is placed in sub packages like
`test/e2e/framework/pod`. The core framework does not depend on those. Sub
packages may depend on the core framework.
The advantages of splitting the code like this are:
- leaner go doc packages by grouping related functions together
- not forcing all E2E suites to import all functionality
- avoiding import cycles
# Execution Flow
When a test suite gets invoked, the top-level `Describe` calls register the
callbacks that define individual tests, but does not invoke them yet. After
that init phase, command line flags are parsed and the `Describe` callbacks are
invoked. Those then define the actual tests for the test suite. Command line
flags can be used to influence the test definitions.
Now `Context/BeforeEach/AfterEach/It` define code that will be called later
when executing a specific test. During this setup phase, `f :=
framework.NewDefaultFramework("some tests")` creates a `Framework` instance for
one or more tests. `NewDefaultFramework` initializes that instance anew for
each test with a `BeforeEach` callback. Starting with Kubernetes 1.26, that
instance gets cleaned up after all other code for a test has been invoked, so
the following code is correct:
```
f := framework.NewDefaultFramework("some tests")
ginkgo.AfterEach(func() {
# Do something with f.ClientSet.
}
ginkgo.It("test something", func() {
# The actual test.
})
```
Optional functionality can be injected into each test by adding a callback to
`NewFrameworkExtensions` in an init function. `NewDefaultFramework` will invoke
those callbacks as if the corresponding code had been added to each test like this:
```
f := framework.NewDefaultFramework("some tests")
optional.SomeCallback(f)
```
`SomeCallback` then can register additional `BeforeEach` or `AfterEach`
callbacks that use the test's `Framework` instance.
When a test runs, callbacks defined for it with `BeforeEach` and `AfterEach`
are called in first-in-first-out order. Since the migration to ginkgo v2 in
Kubernetes 1.25, the `AfterEach` callback is called also when there has been a
test failure. This can be used to run cleanup code for a test
reliably. However,
[`ginkgo.DeferCleanup`](https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup)
is often a better alternative. Its callbacks are executed in first-in-last-out
order.
`test/e2e/framework/internal/unittests/cleanup/cleanup.go` shows how these
different callbacks can be used and in which order they are going to run.

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
v1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1"
v1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
@ -152,13 +152,13 @@ func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool {
isRBACEnabledOnce.Do(func() {
crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
framework.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
isRBACEnabled = false
} else if crs == nil || len(crs.Items) == 0 {
e2elog.Logf("No ClusterRoles found; assuming RBAC is disabled.")
framework.Logf("No ClusterRoles found; assuming RBAC is disabled.")
isRBACEnabled = false
} else {
e2elog.Logf("Found ClusterRoles; assuming RBAC is enabled.")
framework.Logf("Found ClusterRoles; assuming RBAC is enabled.")
isRBACEnabled = true
}
})

View File

@ -40,6 +40,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/test/e2e/framework"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
@ -628,7 +629,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, resourceCli
dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig,
}
dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo
dpConfig.NodeDumpFunc = e2edebug.DumpNodeDebugInfo
dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers
switch kind {
@ -736,7 +737,7 @@ func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string)
// runReplicaSet launches (and verifies correctness) of a replicaset.
func runReplicaSet(config testutils.ReplicaSetConfig) error {
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = framework.DumpNodeDebugInfo
config.NodeDumpFunc = e2edebug.DumpNodeDebugInfo
config.ContainerDumpFunc = e2ekubectl.LogFailedContainers
return testutils.RunReplicaSet(config)
}

View File

@ -0,0 +1,187 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package debug
import (
"context"
"fmt"
"sort"
"time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// EventsLister is a func that lists events.
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
// dumpEventsInNamespace dumps events in the given namespace.
func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := eventsLister(metav1.ListOptions{}, namespace)
framework.ExpectNoError(err, "failed to list events in namespace %q", namespace)
ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items)))
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
framework.Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
}
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.CoreV1().Events(ns).List(context.TODO(), opts)
}, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace, framework.TestContext.ReportDir)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := framework.TestContext.MaxNodesToGather
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Logf("unable to fetch node list: %v", err)
return
}
if len(nodes.Items) <= maxNodesForDump {
dumpAllNodeInfo(c, nodes)
} else {
framework.Logf("skipping dumping cluster info - cluster too large")
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []v1.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
}
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, framework.Logf)
}
// DumpNodeDebugInfo dumps debug information of the given nodes.
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := getKubeletPods(c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
continue
}
for _, p := range podList.Items {
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
logFunc("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
logFunc("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
// TODO: Log node resource info
}
}
// getKubeletPods retrieves the list of pods on the kubelet.
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
var client restclient.Result
finished := make(chan struct{}, 1)
go func() {
// call chain tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165
client = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, framework.KubeletPort)).
Suffix("pods").
Do(context.TODO())
finished <- struct{}{}
}()
select {
case <-finished:
result := &v1.PodList{}
if err := client.Into(result); err != nil {
return &v1.PodList{}, err
}
return result, nil
case <-time.After(framework.PodGetTimeout):
return &v1.PodList{}, fmt.Errorf("Waiting up to %v for getting the list of pods", framework.PodGetTimeout)
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": metav1.NamespaceAll,
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
if err != nil {
framework.Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}
}
return events.Items
}

View File

@ -0,0 +1,101 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package init sets debug.DumpAllNamespaceInfo as implementation in the framework
// and enables log size verification and resource gathering.
package init
import (
"sync"
"time"
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
)
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
func(f *framework.Framework) {
f.DumpAllNamespaceInfo = func(f *framework.Framework, ns string) {
e2edebug.DumpAllNamespaceInfo(f.ClientSet, ns)
}
if framework.TestContext.GatherLogsSizes {
var (
wg sync.WaitGroup
closeChannel chan bool
verifier *e2edebug.LogsSizeVerifier
)
ginkgo.BeforeEach(func() {
wg.Add(1)
closeChannel = make(chan bool)
verifier = e2edebug.NewLogsVerifier(f.ClientSet, closeChannel)
go func() {
defer wg.Done()
verifier.Run()
}()
ginkgo.DeferCleanup(func() {
ginkgo.By("Gathering log sizes data", func() {
close(closeChannel)
wg.Wait()
f.TestSummaries = append(f.TestSummaries, verifier.GetSummary())
})
})
})
}
if framework.TestContext.GatherKubeSystemResourceUsageData != "false" &&
framework.TestContext.GatherKubeSystemResourceUsageData != "none" {
ginkgo.BeforeEach(func() {
var nodeMode e2edebug.NodesSet
switch framework.TestContext.GatherKubeSystemResourceUsageData {
case "master":
nodeMode = e2edebug.MasterNodes
case "masteranddns":
nodeMode = e2edebug.MasterAndDNSNodes
default:
nodeMode = e2edebug.AllNodes
}
gatherer, err := e2edebug.NewResourceUsageGatherer(f.ClientSet, e2edebug.ResourceGathererOptions{
InKubemark: framework.ProviderIs("kubemark"),
Nodes: nodeMode,
ResourceDataGatheringPeriod: 60 * time.Second,
ProbeDuration: 15 * time.Second,
PrintVerboseLogs: false,
}, nil)
if err != nil {
framework.Logf("Error while creating NewResourceUsageGatherer: %v", err)
return
}
go gatherer.StartGatheringData()
ginkgo.DeferCleanup(func() {
ginkgo.By("Collecting resource usage data", func() {
summary, resourceViolationError := gatherer.StopAndSummarize([]int{90, 99, 100}, nil /* no constraints */)
// Always record the summary, even if there was an error.
f.TestSummaries = append(f.TestSummaries, summary)
// Now fail if there was an error.
framework.ExpectNoError(resourceViolationError)
})
})
})
}
},
)
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package debug
import (
"bytes"
@ -27,7 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
@ -109,7 +109,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
// PrintJSON returns the summary of log size data with JSON format.
func (s *LogsSizeDataSummary) PrintJSON() string {
return PrettyPrintJSON(*s)
return framework.PrettyPrintJSON(*s)
}
// SummaryKind returns the summary of log size data summary.
@ -158,8 +158,8 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(c)
ExpectNoError(err)
instanceAddress := APIAddress() + ":22"
framework.ExpectNoError(err)
instanceAddress := framework.APIAddress() + ":22"
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
workers := make([]*LogSizeGatherer, workersNo)
@ -256,13 +256,13 @@ func (g *LogSizeGatherer) Work() bool {
sshResult, err := e2essh.SSH(
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
workItem.ip,
TestContext.Provider,
framework.TestContext.Provider,
)
if err != nil {
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
framework.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
// In case of repeated error give up.
if workItem.backoffMultiplier >= 128 {
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
framework.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
g.wg.Done()
return false
}
@ -278,7 +278,7 @@ func (g *LogSizeGatherer) Work() bool {
path := results[i]
size, err := strconv.Atoi(results[i+1])
if err != nil {
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
framework.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
continue
}
g.data.addNewData(workItem.ip, path, now, size)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package debug
import (
"bufio"
@ -38,7 +38,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
@ -91,7 +91,7 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
// PrintJSON prints resource usage summary in JSON.
func (s *ResourceUsageSummary) PrintJSON() string {
return PrettyPrintJSON(*s)
return framework.PrettyPrintJSON(*s)
}
// SummaryKind returns string of ResourceUsageSummary
@ -198,13 +198,13 @@ func (w *resourceGatherWorker) singleProbe() {
} else {
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
if err != nil {
Logf("Error while reading data from %v: %v", w.nodeName, err)
framework.Logf("Error while reading data from %v: %v", w.nodeName, err)
return
}
for k, v := range nodeUsage {
data[k] = v
if w.printVerboseLogs {
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
framework.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
}
}
}
@ -290,13 +290,13 @@ func getOneTimeResourceUsageOnNode(
// getStatsSummary contacts kubelet for the container information.
func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) {
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
data, err := c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, KubeletPort)).
Name(fmt.Sprintf("%v:%v", nodeName, framework.KubeletPort)).
Suffix("stats/summary").
Do(ctx).Raw()
@ -322,7 +322,7 @@ func removeUint64Ptr(ptr *uint64) uint64 {
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer Logf("Closing worker for %v", w.nodeName)
defer framework.Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }()
select {
case <-time.After(initialSleep):
@ -384,7 +384,7 @@ func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, erro
return false, err
}
if len(podList.Items) < 1 {
Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
framework.Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
}
for _, pod := range podList.Items {
if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) {
@ -422,7 +422,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("Error while listing Pods: %v", err)
framework.Logf("Error while listing Pods: %v", err)
return nil, err
}
}
@ -458,7 +458,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("Error while listing Nodes: %v", err)
framework.Logf("Error while listing Nodes: %v", err)
return nil, err
}
@ -510,7 +510,7 @@ func (g *ContainerResourceGatherer) StartGatheringData() {
// specified resource constraints.
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
close(g.stopCh)
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
framework.Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{}, 1)
go func() {
g.workerWg.Wait()
@ -518,7 +518,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
}()
select {
case <-finished:
Logf("Waitgroup finished.")
framework.Logf("Waitgroup finished.")
case <-time.After(2 * time.Minute):
unfinished := make([]string, 0)
for i := range g.workers {
@ -526,11 +526,11 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
unfinished = append(unfinished, g.workers[i].nodeName)
}
}
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
framework.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
}
if len(percentiles) == 0 {
Logf("Warning! Empty percentile list for stopAndPrintData.")
framework.Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
}
data := make(map[int]ResourceUsagePerContainer)
@ -604,7 +604,7 @@ type kubemarkResourceUsage struct {
}
func getMasterUsageByPrefix(prefix string) (string, error) {
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), APIAddress()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider)
if err != nil {
return "", err
}
@ -617,7 +617,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
// Get kubernetes component resource usage
sshResult, err := getMasterUsageByPrefix("kube")
if err != nil {
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
framework.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
return nil
}
scanner := bufio.NewScanner(strings.NewReader(sshResult))
@ -635,7 +635,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
// Get etcd resource usage
sshResult, err = getMasterUsageByPrefix("bin/etcd")
if err != nil {
Logf("Error when trying to SSH to master machine. Skipping probe")
framework.Logf("Error when trying to SSH to master machine. Skipping probe")
return nil
}
scanner = bufio.NewScanner(strings.NewReader(sshResult))

View File

@ -28,7 +28,6 @@ import (
"os"
"path"
"strings"
"sync"
"time"
"k8s.io/apimachinery/pkg/runtime"
@ -50,10 +49,6 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)
const (
@ -61,6 +56,25 @@ const (
DefaultNamespaceDeletionTimeout = 5 * time.Minute
)
var (
// NewFrameworkExtensions lists functions that get called by
// NewFramework after constructing a new framework and after
// calling ginkgo.BeforeEach for the framework.
//
// This can be used by extensions of the core framework to modify
// settings in the framework instance or to add additional callbacks
// with gingko.BeforeEach/AfterEach/DeferCleanup.
//
// When a test runs, functions will be invoked in this order:
// - f.BeforeEach
// - all BeforeEaches in the order in which they were defined (first-in-first-out)
// - It callback
// - all AfterEaches in the order in which they were defined
// - all DeferCleanups with the order reversed (first-in-last-out)
// - f.AfterEach
NewFrameworkExtensions []func(f *Framework)
)
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
// Eventual goal is to merge this with integration test framework.
type Framework struct {
@ -85,16 +99,6 @@ type Framework struct {
NamespaceDeletionTimeout time.Duration
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
gatherer *ContainerResourceGatherer
// Constraints that passed to a check which is executed after data is gathered to
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
// as expectations vary greatly. Constraints are grouped by the container names.
AddonResourceConstraints map[string]ResourceConstraint
logsSizeWaitGroup sync.WaitGroup
logsSizeCloseChannel chan bool
logsSizeVerifier *LogsSizeVerifier
// Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport
@ -105,13 +109,18 @@ type Framework struct {
// or stdout if ReportDir is not set once test ends.
TestSummaries []TestDataSummary
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
clusterAutoscalerMetricsBeforeTest e2emetrics.Collection
// Timeouts contains the custom timeouts used during the test execution.
Timeouts *TimeoutContext
// DumpAllNamespaceInfo is invoked by the framework to record
// information about a namespace after a test failure.
DumpAllNamespaceInfo DumpAllNamespaceInfoAction
}
// DumpAllNamespaceInfoAction is called after each failed test for namespaces
// created for the test.
type DumpAllNamespaceInfoAction func(f *Framework, namespace string)
// TestDataSummary is an interface for managing test data.
type TestDataSummary interface {
SummaryKind() string
@ -148,14 +157,19 @@ func NewDefaultFramework(baseName string) *Framework {
// NewFramework creates a test framework.
func NewFramework(baseName string, options Options, client clientset.Interface) *Framework {
f := &Framework{
BaseName: baseName,
AddonResourceConstraints: make(map[string]ResourceConstraint),
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
BaseName: baseName,
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
}
// The order is important here: if the extension calls ginkgo.BeforeEach
// itself, then it can be sure that f.BeforeEach already ran when its
// own callback gets invoked.
ginkgo.BeforeEach(f.BeforeEach, AnnotatedLocation("set up framework"))
for _, extension := range NewFrameworkExtensions {
extension(f)
}
return f
}
@ -235,59 +249,6 @@ func (f *Framework) BeforeEach() {
f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31())
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
var err error
var nodeMode NodesSet
switch TestContext.GatherKubeSystemResourceUsageData {
case "master":
nodeMode = MasterNodes
case "masteranddns":
nodeMode = MasterAndDNSNodes
default:
nodeMode = AllNodes
}
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
InKubemark: ProviderIs("kubemark"),
Nodes: nodeMode,
ResourceDataGatheringPeriod: 60 * time.Second,
ProbeDuration: 15 * time.Second,
PrintVerboseLogs: false,
}, nil)
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.StartGatheringData()
}
}
if TestContext.GatherLogsSizes {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()
}()
}
gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master"
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
} else {
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
if err != nil {
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
} else {
Logf("Gathered ClusterAutoscaler metrics before test")
}
}
}
f.flakeReport = NewFlakeReport()
}
@ -301,7 +262,7 @@ func (f *Framework) dumpNamespaceInfo() {
ginkgo.By("dump namespace information after failure", func() {
if !f.SkipNamespaceCreation {
for _, ns := range f.namespacesToDelete {
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
f.DumpAllNamespaceInfo(f, ns.Name)
}
}
})
@ -367,8 +328,8 @@ func (f *Framework) AfterEach() {
nsDeletionErrors[ns.Name] = err
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
f.DumpAllNamespaceInfo(f, ns.Name)
}
} else {
Logf("Namespace %v was already deleted", ns.Name)
@ -399,37 +360,6 @@ func (f *Framework) AfterEach() {
}
}()
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
ginkgo.By("Collecting resource usage data")
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
defer ExpectNoError(resourceViolationError)
f.TestSummaries = append(f.TestSummaries, summary)
}
if TestContext.GatherLogsSizes {
ginkgo.By("Gathering log sizes data")
close(f.logsSizeCloseChannel)
f.logsSizeWaitGroup.Wait()
f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
}
if TestContext.GatherMetricsAfterTest != "false" {
ginkgo.By("Gathering metrics")
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
} else {
received, err := grabber.Grab()
if err != nil {
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
}
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
}
}
TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
// Report any flakes that were observed in the e2e test and reset.
@ -439,13 +369,6 @@ func (f *Framework) AfterEach() {
}
printSummaries(f.TestSummaries, f.BaseName)
// Check whether all nodes are ready after the test.
// This is explicitly done at the very end of the test, to avoid
// e.g. not removing namespace in case of this failure.
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
Failf("All nodes should be ready after test, %v", err)
}
}
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to
@ -474,8 +397,8 @@ func (f *Framework) DeleteNamespace(name string) {
}
}()
// if current test failed then we should dump namespace information
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
DumpAllNamespaceInfo(f.ClientSet, name)
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
f.DumpAllNamespaceInfo(f, name)
}
}
@ -538,20 +461,6 @@ func (f *Framework) ClientConfig() *rest.Config {
return ret
}
// TestContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
}
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
}
// KubeUser is a struct for managing kubernetes user info.
type KubeUser struct {
Name string `yaml:"name"`

View File

@ -1,106 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
// with structured data instead of a constant string.
package ginkgowrapper
import (
"bufio"
"bytes"
"regexp"
"runtime"
"runtime/debug"
"strings"
"github.com/onsi/ginkgo/v2"
)
// FailurePanic is the value that will be panicked from Fail.
type FailurePanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
const ginkgoFailurePanic = `
Your test failed.
Ginkgo panics to prevent subsequent assertions from running.
Normally Ginkgo rescues this panic so you shouldn't see it.
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
To circumvent this, you should call
defer GinkgoRecover()
at the top of the goroutine that caused this panic.
`
// String makes FailurePanic look like the old Ginkgo panic when printed.
func (FailurePanic) String() string { return ginkgoFailurePanic }
// Fail wraps ginkgo.Fail so that it panics with more useful
// information about the failure. This function will panic with a
// FailurePanic.
func Fail(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
fp := FailurePanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: pruneStack(skip),
}
defer func() {
e := recover()
if e != nil {
panic(fp)
}
}()
ginkgo.Fail(message, skip)
}
// ginkgo adds a lot of test running infrastructure to the stack, so
// we filter those out
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
func pruneStack(skip int) string {
skip += 2 // one for pruneStack and one for debug.Stack
stack := debug.Stack()
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
var prunedStack []string
// skip the top of the stack
for i := 0; i < 2*skip+1; i++ {
scanner.Scan()
}
for scanner.Scan() {
if stackSkipPattern.Match(scanner.Bytes()) {
scanner.Scan() // these come in pairs
} else {
prunedStack = append(prunedStack, scanner.Text())
scanner.Scan() // these come in pairs
prunedStack = append(prunedStack, scanner.Text())
}
}
return strings.Join(prunedStack, "\n")
}

View File

@ -59,6 +59,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
testutils "k8s.io/kubernetes/test/utils"
@ -465,10 +466,10 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
}
j.Logger.Infof("creating replication controller")
framework.RunKubectlOrDieInput(ns, read("rc.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(ns, read("rc.yaml"), "create", "-f", "-")
j.Logger.Infof("creating service")
framework.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-")
if len(svcAnnotations) > 0 {
svcList, err := j.Client.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
@ -481,7 +482,7 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
if exists("secret.yaml") {
j.Logger.Infof("creating secret")
framework.RunKubectlOrDieInput(ns, read("secret.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(ns, read("secret.yaml"), "create", "-f", "-")
}
j.Logger.Infof("Parsing ingress from %v", filepath.Join(manifestPath, "ing.yaml"))
@ -550,7 +551,7 @@ func (j *TestJig) runCreate(ing *networkingv1.Ingress) (*networkingv1.Ingress, e
if err := ingressToManifest(ing, filePath); err != nil {
return nil, err
}
_, err := framework.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
_, err := e2ekubectl.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
return ing, err
}
@ -565,14 +566,14 @@ func (j *TestJig) runUpdate(ing *networkingv1.Ingress) (*networkingv1.Ingress, e
if err := ingressToManifest(ing, filePath); err != nil {
return nil, err
}
_, err := framework.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
_, err := e2ekubectl.RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
return ing, err
}
// DescribeIng describes information of ingress by running kubectl describe ing.
func DescribeIng(ns string) {
framework.Logf("\nOutput of kubectl describe ing:\n")
desc, _ := framework.RunKubectl(
desc, _ := e2ekubectl.RunKubectl(
ns, "describe", "ing")
framework.Logf(desc)
}
@ -680,7 +681,7 @@ func (j *TestJig) runDelete(ing *networkingv1.Ingress) error {
if err := ingressToManifest(ing, filePath); err != nil {
return err
}
_, err := framework.RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
_, err := e2ekubectl.RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
return err
}
@ -688,7 +689,7 @@ func (j *TestJig) runDelete(ing *networkingv1.Ingress) error {
// TODO(nikhiljindal): Update this to be able to return hostname as well.
func getIngressAddressFromKubemci(name string) ([]string, error) {
var addresses []string
out, err := framework.RunKubemciCmd("get-status", name)
out, err := e2ekubectl.RunKubemciCmd("get-status", name)
if err != nil {
return addresses, err
}
@ -1032,7 +1033,7 @@ func (cont *NginxIngressController) Init() {
}
framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-")
e2ekubectl.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-")
rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
framework.ExpectNoError(err)

View File

@ -56,13 +56,27 @@ var _ = ginkgo.Describe("e2e", func() {
f := framework.NewDefaultFramework("test-namespace")
// BeforeEach/AfterEach run in first-in-first-out order.
ginkgo.BeforeEach(func() {
framework.Logf("before #1")
})
ginkgo.BeforeEach(func() {
framework.Logf("before #2")
})
ginkgo.AfterEach(func() {
framework.Logf("after")
framework.Logf("after #1")
if f.ClientSet == nil {
framework.Fail("Wrong order of cleanup operations: framework.AfterEach already ran and cleared f.ClientSet.")
}
})
ginkgo.AfterEach(func() {
framework.Logf("after #2")
})
ginkgo.It("works", func() {
// DeferCleanup invokes in first-in-last-out order
ginkgo.DeferCleanup(func() {
@ -74,6 +88,16 @@ var _ = ginkgo.Describe("e2e", func() {
})
})
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
// This callback runs directly after NewDefaultFramework is done.
func(f *framework.Framework) {
ginkgo.BeforeEach(func() { framework.Logf("extension before") })
ginkgo.AfterEach(func() { framework.Logf("extension after") })
},
)
}
const (
ginkgoOutput = `[BeforeEach] e2e
cleanup_test.go:53
@ -84,16 +108,31 @@ STEP: Creating a kubernetes client
INFO: >>> kubeConfig: yyy/kube.config
STEP: Building a namespace api object, basename test-namespace
INFO: Skipping waiting for service account
[BeforeEach] e2e
cleanup_test.go:95
INFO: extension before
[BeforeEach] e2e
cleanup_test.go:61
INFO: before #1
[BeforeEach] e2e
cleanup_test.go:65
INFO: before #2
[It] works
cleanup_test.go:66
cleanup_test.go:80
[AfterEach] e2e
cleanup_test.go:59
INFO: after
cleanup_test.go:96
INFO: extension after
[AfterEach] e2e
cleanup_test.go:69
INFO: after #1
[AfterEach] e2e
cleanup_test.go:76
INFO: after #2
[DeferCleanup] e2e
cleanup_test.go:71
cleanup_test.go:85
INFO: cleanup first
[DeferCleanup] e2e
cleanup_test.go:68
cleanup_test.go:82
INFO: cleanup last
[DeferCleanup] e2e
dump namespaces | framework.go:xxx

View File

@ -18,7 +18,7 @@ package job
import (
batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/kubernetes/test/e2e/framework"

View File

@ -18,6 +18,7 @@ package job
import (
"context"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -21,7 +21,7 @@ import (
"time"
batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"

View File

@ -0,0 +1,191 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"bytes"
"fmt"
"io"
"net"
"net/url"
"os/exec"
"strings"
"syscall"
"time"
"k8s.io/client-go/tools/clientcmd"
uexec "k8s.io/utils/exec"
"k8s.io/kubernetes/test/e2e/framework"
)
// KubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type KubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
b := new(KubectlBuilder)
tk := NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
b.cmd = tk.KubectlCmd(args...)
return b
}
// WithEnv sets the given environment and returns itself.
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
b.cmd.Env = env
return b
}
// WithTimeout sets the given timeout and returns itself.
func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder {
b.timeout = t
return b
}
// WithStdinData sets the given data to stdin and returns itself.
func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
// WithStdinReader sets the given reader and returns itself.
func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
b.cmd.Stdin = reader
return &b
}
// ExecOrDie runs the kubectl executable or dies if error occurs.
func (b KubectlBuilder) ExecOrDie(namespace string) string {
str, err := b.Exec()
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
framework.Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl(namespace, "version")
framework.Logf("stdout: %q", retryStr)
framework.Logf("err: %v", retryErr)
}
framework.ExpectNoError(err)
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
case net.Error:
if err.Timeout() {
return true
}
}
return false
}
// Exec runs the kubectl executable.
func (b KubectlBuilder) Exec() (string, error) {
stdout, _, err := b.ExecWithFullOutput()
return stdout, err
}
// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr.
func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc = 127
if ee, ok := err.(*exec.ExitError); ok {
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
framework.Logf("rc: %d", rc)
}
return stdout.String(), stderr.String(), uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr)
}
framework.Logf("stderr: %q", stderr.String())
framework.Logf("stdout: %q", stdout.String())
return stdout.String(), stderr.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(namespace string, args ...string) string {
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(namespace string, args ...string) (string, error) {
return NewKubectlCommand(namespace, args...).Exec()
}
// RunKubectlWithFullOutput is a convenience wrapper over kubectlBuilder
// It will also return the command's stderr.
func RunKubectlWithFullOutput(namespace string, args ...string) (string, string, error) {
return NewKubectlCommand(namespace, args...).ExecWithFullOutput()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
}
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
}
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
func RunKubemciWithKubeconfig(args ...string) (string, error) {
if framework.TestContext.KubeConfig != "" {
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+framework.TestContext.KubeConfig)
}
return RunKubemciCmd(args...)
}
// RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
// It assumes that kubemci exists in PATH.
func RunKubemciCmd(args ...string) (string, error) {
// kubemci is assumed to be in PATH.
kubemci := "kubemci"
b := new(KubectlBuilder)
args = append(args, "--gcp-project="+framework.TestContext.CloudConfig.ProjectID)
b.cmd = exec.Command(kubemci, args...)
return b.Exec()
}

View File

@ -29,7 +29,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -109,7 +109,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", e2elog.Logf)
kubectlLogPod(c, pod, "", framework.Logf)
}
}
}
@ -144,7 +144,7 @@ func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, p
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
if err != nil {
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
framework.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return err
}
@ -155,7 +155,7 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path)
if err != nil {
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
framework.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return string(stdout), err
}
@ -163,19 +163,19 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
if numRetries > 0 {
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
framework.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
}
stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...)
if err != nil {
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
// Retry on "i/o timeout" errors
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
framework.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
continue
}
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
// Retry on "container not found" errors
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
framework.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
time.Sleep(2 * time.Second)
continue
}
@ -200,7 +200,7 @@ func (tk *TestKubeconfig) kubectlExec(namespace string, podName, containerName s
cmd := tk.KubectlCmd(cmdArgs...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
err := cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err
}

View File

@ -20,13 +20,12 @@ import (
"bytes"
"fmt"
"regexp"
"runtime"
"runtime/debug"
"time"
"github.com/onsi/ginkgo/v2"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -48,7 +47,7 @@ func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
skip := 2
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
fail(nowStamp()+": "+msg, skip)
panic("unreachable")
}
@ -60,7 +59,55 @@ func Fail(msg string, callerSkip ...int) {
skip += callerSkip[0]
}
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
fail(nowStamp()+": "+msg, skip)
}
// FailurePanic is the value that will be panicked from Fail.
type FailurePanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
const ginkgoFailurePanic = `
Your test failed.
Ginkgo panics to prevent subsequent assertions from running.
Normally Ginkgo rescues this panic so you shouldn't see it.
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
To circumvent this, you should call
defer GinkgoRecover()
at the top of the goroutine that caused this panic.
`
// String makes FailurePanic look like the old Ginkgo panic when printed.
func (FailurePanic) String() string { return ginkgoFailurePanic }
// fail wraps ginkgo.Fail so that it panics with more useful
// information about the failure. This function will panic with a
// FailurePanic.
func fail(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
fp := FailurePanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: string(PrunedStack(skip)),
}
defer func() {
e := recover()
if e != nil {
panic(fp)
}
}()
ginkgo.Fail(message, skip)
}
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)

View File

@ -1,54 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package log will be removed after switching to use core framework log.
// Do not make further changes here!
package log
import (
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
// Logf logs the info.
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
// Failf logs the fail info.
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("FAIL", msg)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}

View File

@ -23,7 +23,7 @@ import (
"time"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"

View File

@ -22,7 +22,7 @@ import (
"fmt"
"k8s.io/component-base/metrics/testutil"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
@ -94,12 +94,12 @@ func (m *ComponentCollection) PrintHumanReadable() string {
func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil {
e2elog.Logf("Error building encoder: %v", err)
framework.Logf("Error building encoder: %v", err)
return ""
}
formatted := &bytes.Buffer{}
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
e2elog.Logf("Error indenting: %v", err)
framework.Logf("Error indenting: %v", err)
return ""
}
return string(formatted.Bytes())

View File

@ -0,0 +1,73 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
)
func GrabBeforeEach(f *framework.Framework) (result *Collection) {
gatherMetricsAfterTest := framework.TestContext.GatherMetricsAfterTest == "true" || framework.TestContext.GatherMetricsAfterTest == "master"
if !gatherMetricsAfterTest || !framework.TestContext.IncludeClusterAutoscalerMetrics {
return nil
}
ginkgo.By("Gathering metrics before test", func() {
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
return
}
metrics, err := grabber.Grab()
if err != nil {
framework.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
return
}
framework.Logf("Gathered ClusterAutoscaler metrics before test")
result = &metrics
})
return
}
func GrabAfterEach(f *framework.Framework, before *Collection) {
if framework.TestContext.GatherMetricsAfterTest == "false" {
return
}
ginkgo.By("Gathering metrics after test", func() {
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
grabMetricsFromKubelets := framework.TestContext.GatherMetricsAfterTest != "master" && !framework.ProviderIs("kubemark")
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
return
}
received, err := grabber.Grab()
if err != nil {
framework.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
return
}
if before == nil {
before = &Collection{}
}
(*ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(*before)
f.TestSummaries = append(f.TestSummaries, (*ComponentCollection)(&received))
})
}

View File

@ -0,0 +1,39 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package init installs GrabBeforeEach and GrabAfterEach as callbacks
// for gathering data before and after a test.
package init
import (
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
func(f *framework.Framework) {
ginkgo.BeforeEach(func() {
metrics := e2emetrics.GrabBeforeEach(f)
ginkgo.DeferCleanup(func() {
e2emetrics.GrabAfterEach(f, metrics)
})
})
},
)
}

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
@ -226,7 +226,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
for _, m := range latencyMetrics {
if m.Latency > threshold {
badMetrics = append(badMetrics, m)
e2elog.Logf("%+v", m)
framework.Logf("%+v", m)
}
}
return badMetrics, nil

View File

@ -41,8 +41,10 @@ import (
clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -174,7 +176,7 @@ type NetworkingTestConfig struct {
// 1 pod per node running the netexecImage.
EndpointPods []*v1.Pod
f *framework.Framework
podClient *framework.PodClient
podClient *e2epod.PodClient
// NodePortService is a Service with Type=NodePort spanning over all
// endpointPods.
NodePortService *v1.Service
@ -248,7 +250,7 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
continue
}
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := framework.RunKubectl(
desc, _ := e2ekubectl.RunKubectl(
e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
framework.Logf(desc)
}
@ -356,7 +358,7 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe
eps := sets.NewString()
for i := 0; i < tries; i++ {
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.TestContainerPod.Name, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
if err != nil {
// A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
@ -391,7 +393,7 @@ func (config *NetworkingTestConfig) GetResponseFromContainer(protocol, dialComma
ipPort := net.JoinHostPort(containerIP, strconv.Itoa(containerHTTPPort))
cmd := makeCURLDialCommand(ipPort, dialCommand, protocol, targetIP, targetPort)
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.TestContainerPod.Name, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
if err != nil {
return NetexecDialResponse{}, fmt.Errorf("failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
}
@ -415,7 +417,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP
targetIP,
targetPort,
path)
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.TestContainerPod.Name, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.TestContainerPod.Name, cmd)
// We only care about the status code reported by curl,
// and want to return any other errors, such as cannot execute command in the Pod.
// If curl failed to connect to host, it would exit with code 7, which makes `ExecShellInPodWithFullOutput`
@ -463,7 +465,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
filterCmd := fmt.Sprintf("%s | grep -v '^\\s*$'", cmd)
framework.Logf("Going to poll %v on port %v at least %v times, with a maximum of %v tries before failing", targetIP, targetPort, minTries, maxTries)
for i := 0; i < maxTries; i++ {
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.HostTestContainerPod.Name, filterCmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(config.f, config.HostTestContainerPod.Name, filterCmd)
if err != nil || len(stderr) > 0 {
// A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
@ -519,7 +521,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
podName := config.HostTestContainerPod.Name
var msg string
if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) {
stdout, err := framework.RunHostCmd(config.Namespace, podName, cmd)
stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd)
if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
framework.Logf(msg)
@ -533,7 +535,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
return true, nil
}); pollErr != nil {
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := framework.RunKubectl(
desc, _ := e2ekubectl.RunKubectl(
config.Namespace, "describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
framework.Logf("%s", desc)
framework.Failf("Timed out in %v: %v", retryTimeout, msg)
@ -776,7 +778,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector)
ginkgo.By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet)
framework.ExpectNoError(err)
@ -836,7 +838,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
}
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetBoundedReadySchedulableNodes(config.f.ClientSet, maxNetProxyPodsCount)
framework.ExpectNoError(err)
nodes := nodeList.Items
@ -893,9 +895,9 @@ func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod {
return config.getPodClient().Create(pod)
}
func (config *NetworkingTestConfig) getPodClient() *framework.PodClient {
func (config *NetworkingTestConfig) getPodClient() *e2epod.PodClient {
if config.podClient == nil {
config.podClient = config.f.PodClient()
config.podClient = e2epod.NewPodClient(config.f)
}
return config.podClient
}

View File

@ -0,0 +1,165 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
)
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
return wait.PollImmediate(
30*time.Second,
timeout,
CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
)
}
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
framework.ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
// ExpectNodeHasLabel expects that the given node has the given label pair.
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(node.Labels[labelKey], labelValue)
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
framework.ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
ginkgo.By("verifying the node doesn't have the label " + labelKey)
framework.ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
// ExpectNodeHasTaint expects that the node has the given taint.
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
framework.ExpectNoError(err)
framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
// NodeHasTaint returns true if the node has the given taint, else returns false.
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
// framework.TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %v", err)
}
return nil
}
func allNodesReady(c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > framework.TestContext.AllowedNotReadyNodes {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}

View File

@ -0,0 +1,37 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package init registers node.AllNodesReady.
package init
import (
"time"
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
func(f *framework.Framework) {
ginkgo.AfterEach(func() {
e2enode.AllNodesReady(f.ClientSet, 3*time.Minute)
})
},
)
}

View File

@ -0,0 +1,93 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"sync"
"time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
// NodeKiller is a utility to simulate node failures.
type NodeKiller struct {
config framework.NodeKillerConfig
client clientset.Interface
provider string
}
// NewNodeKiller creates new NodeKiller.
func NewNodeKiller(config framework.NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
config.NodeKillerStopCh = make(chan struct{})
return &NodeKiller{config, client, provider}
}
// Run starts NodeKiller until stopCh is closed.
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
// wait.JitterUntil starts work immediately, so wait first.
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
wait.JitterUntil(func() {
nodes := k.pickNodes()
k.kill(nodes)
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
}
func (k *NodeKiller) pickNodes() []v1.Node {
nodes, err := GetReadySchedulableNodes(k.client)
framework.ExpectNoError(err)
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
nodes, err = GetBoundedReadySchedulableNodes(k.client, numNodes)
framework.ExpectNoError(err)
return nodes.Items
}
func (k *NodeKiller) kill(nodes []v1.Node) {
wg := sync.WaitGroup{}
wg.Add(len(nodes))
for _, node := range nodes {
node := node
go func() {
defer ginkgo.GinkgoRecover()
defer wg.Done()
framework.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
if err != nil {
framework.Logf("ERROR while stopping node %q: %v", node.Name, err)
return
}
time.Sleep(k.config.SimulatedDowntime)
framework.Logf("Rebooting %q to repair the node", node.Name)
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
if err != nil {
framework.Logf("ERROR while rebooting node %q: %v", node.Name, err)
return
}
}()
}
wg.Wait()
}

View File

@ -40,7 +40,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientretry "k8s.io/client-go/util/retry"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
netutil "k8s.io/utils/net"
)
@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
}
if !silent {
e2elog.Logf(msg)
framework.Logf(msg)
}
return false
}
@ -137,7 +137,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
return true
}
if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
@ -146,7 +146,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
return true
}
if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
@ -154,7 +154,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
}
if !silent {
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
framework.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
@ -196,7 +196,7 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
func TotalRegistered(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
framework.Logf("Failed to list nodes: %v", err)
return 0, err
}
return len(nodes.Items), nil
@ -206,7 +206,7 @@ func TotalRegistered(c clientset.Interface) (int, error) {
func TotalReady(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
framework.Logf("Failed to list nodes: %v", err)
return 0, err
}
@ -220,7 +220,7 @@ func TotalReady(c clientset.Interface) (int, error) {
// GetExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetExternalIP(node *v1.Node) (string, error) {
e2elog.Logf("Getting external IP address for %s", node.Name)
framework.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
@ -628,7 +628,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
"app": appName + "-pod",
}
for i, node := range nodes.Items {
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
@ -884,16 +884,6 @@ func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
if taintExists(nodeUpdated.Spec.Taints, taint) {
e2elog.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
)
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
framework.Logf("Waiting for SSH tunnels to establish")
e2ekubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",
"--restart=Never",
"--command", "--",
"echo", "Hello")
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})
}

View File

@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
const sleepTime = 20 * time.Second
@ -47,7 +47,7 @@ func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) e
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
framework.Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
@ -115,11 +115,11 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
framework.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Couldn't get node %s", name)
framework.Logf("Couldn't get node %s", name)
continue
}
@ -127,7 +127,7 @@ func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.Node
return true
}
}
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
framework.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
@ -149,7 +149,7 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
framework.Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
@ -163,10 +163,10 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
framework.Logf("Cluster has reached the desired number of ready nodes %d", size)
return nodes.Items, nil
}
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
framework.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
}
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
@ -215,7 +215,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
if err != nil {
var terminalListNodesErr error
e2elog.Logf("Unexpected error listing nodes: %v", err)
framework.Logf("Unexpected error listing nodes: %v", err)
if attempt >= 3 {
terminalListNodesErr = err
}
@ -236,9 +236,9 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
if len(nodesNotReadyYet) > 0 {
// In large clusters, log them only every 10th pass.
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
framework.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
for _, node := range nodesNotReadyYet {
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
framework.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
node.Name,
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
@ -250,7 +250,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
if len(nodesNotReadyYet) > allowedNotReadyNodes {
ready := len(allNodes.Items) - len(nodesNotReadyYet)
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
framework.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
}
}
}

View File

@ -16,56 +16,6 @@ limitations under the License.
package framework
import (
"fmt"
"os"
"path"
"sync"
"time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const etcdImage = "3.5.5-0"
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {
switch TestContext.Provider {
case "gce":
return etcdUpgradeGCE(targetStorage, targetVersion)
default:
return fmt.Errorf("EtcdUpgrade() is not implemented for provider %s", TestContext.Provider)
}
}
func etcdUpgradeGCE(targetStorage, targetVersion string) error {
env := append(
os.Environ(),
"TEST_ETCD_VERSION="+targetVersion,
"STORAGE_BACKEND="+targetStorage,
"TEST_ETCD_IMAGE="+etcdImage)
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
return err
}
// LocationParamGKE returns parameter related to location for gcloud command.
func LocationParamGKE() string {
if TestContext.CloudConfig.MultiMaster {
// GKE Regional Clusters are being tested.
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
}
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
}
// AppendContainerCommandGroupIfNeeded returns container command group parameter if necessary.
func AppendContainerCommandGroupIfNeeded(args []string) []string {
if TestContext.CloudConfig.Region != "" {
@ -74,114 +24,3 @@ func AppendContainerCommandGroupIfNeeded(args []string) []string {
}
return args
}
// MasterUpgradeGKE upgrades master node to the specified version on GKE.
func MasterUpgradeGKE(namespace string, v string) error {
Logf("Upgrading master to %q", v)
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
LocationParamGKE(),
"upgrade",
TestContext.CloudConfig.Cluster,
"--master",
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
_, _, err := RunCmd("gcloud", AppendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
WaitForSSHTunnels(namespace)
return nil
}
// GCEUpgradeScript returns path of script for upgrading on GCE.
func GCEUpgradeScript() string {
if len(TestContext.GCEUpgradeScript) == 0 {
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
}
return TestContext.GCEUpgradeScript
}
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
Logf("Waiting for SSH tunnels to establish")
RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",
"--restart=Never",
"--command", "--",
"echo", "Hello")
defer RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
_, err := RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})
}
// NodeKiller is a utility to simulate node failures.
type NodeKiller struct {
config NodeKillerConfig
client clientset.Interface
provider string
}
// NewNodeKiller creates new NodeKiller.
func NewNodeKiller(config NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
config.NodeKillerStopCh = make(chan struct{})
return &NodeKiller{config, client, provider}
}
// Run starts NodeKiller until stopCh is closed.
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
// wait.JitterUntil starts work immediately, so wait first.
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
wait.JitterUntil(func() {
nodes := k.pickNodes()
k.kill(nodes)
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
}
func (k *NodeKiller) pickNodes() []v1.Node {
nodes, err := e2enode.GetReadySchedulableNodes(k.client)
ExpectNoError(err)
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
nodes, err = e2enode.GetBoundedReadySchedulableNodes(k.client, numNodes)
ExpectNoError(err)
return nodes.Items
}
func (k *NodeKiller) kill(nodes []v1.Node) {
wg := sync.WaitGroup{}
wg.Add(len(nodes))
for _, node := range nodes {
node := node
go func() {
defer ginkgo.GinkgoRecover()
defer wg.Done()
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
if err != nil {
Logf("ERROR while stopping node %q: %v", node.Name, err)
return
}
time.Sleep(k.config.SimulatedDowntime)
Logf("Rebooting %q to repair the node", node.Name)
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
if err != nil {
Logf("ERROR while rebooting node %q: %v", node.Name, err)
return
}
}()
}
wg.Wait()
}

View File

@ -27,7 +27,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
@ -59,7 +59,7 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
@ -67,7 +67,7 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string
}
return fmt.Errorf("pod Delete API error: %v", err)
}
e2elog.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
@ -92,7 +92,7 @@ func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
if err != nil {
if apierrors.IsNotFound(err) {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package pod
import (
"bytes"
@ -28,6 +28,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/onsi/gomega"
)
@ -49,16 +50,16 @@ type ExecOptions struct {
// ExecWithOptions executes a command in the specified container,
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, string, error) {
if !options.Quiet {
Logf("ExecWithOptions %+v", options)
framework.Logf("ExecWithOptions %+v", options)
}
config, err := LoadConfig()
ExpectNoError(err, "failed to load restclient config")
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "failed to load restclient config")
const tty = false
Logf("ExecWithOptions: Clientset creation")
framework.Logf("ExecWithOptions: Clientset creation")
req := f.ClientSet.CoreV1().RESTClient().Post().
Resource("pods").
Name(options.PodName).
@ -75,7 +76,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
}, scheme.ParameterCodec)
var stdout, stderr bytes.Buffer
Logf("ExecWithOptions: execute(POST %s)", req.URL())
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
if options.PreserveWhitespace {
return stdout.String(), stderr.String(), err
@ -85,8 +86,8 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
// ExecCommandInContainerWithFullOutput executes a command in the
// specified container and return stdout, stderr and error
func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) {
return f.ExecWithOptions(ExecOptions{
func ExecCommandInContainerWithFullOutput(f *framework.Framework, podName, containerName string, cmd ...string) (string, string, error) {
return ExecWithOptions(f, ExecOptions{
Command: cmd,
Namespace: f.Namespace.Name,
PodName: podName,
@ -99,42 +100,42 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
}
// ExecCommandInContainer executes a command in the specified container.
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
Logf("Exec stderr: %q", stderr)
ExpectNoError(err,
func ExecCommandInContainer(f *framework.Framework, podName, containerName string, cmd ...string) string {
stdout, stderr, err := ExecCommandInContainerWithFullOutput(f, podName, containerName, cmd...)
framework.Logf("Exec stderr: %q", stderr)
framework.ExpectNoError(err,
"failed to execute command in pod %v, container %v: %v",
podName, containerName, err)
return stdout
}
// ExecShellInContainer executes the specified command on the pod's container.
func (f *Framework) ExecShellInContainer(podName, containerName string, cmd string) string {
return f.ExecCommandInContainer(podName, containerName, "/bin/sh", "-c", cmd)
func ExecShellInContainer(f *framework.Framework, podName, containerName string, cmd string) string {
return ExecCommandInContainer(f, podName, containerName, "/bin/sh", "-c", cmd)
}
func (f *Framework) execCommandInPod(podName string, cmd ...string) string {
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
ExpectNoError(err, "failed to get pod %v", podName)
func execCommandInPod(f *framework.Framework, podName string, cmd ...string) string {
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
return ExecCommandInContainer(f, podName, pod.Spec.Containers[0].Name, cmd...)
}
func (f *Framework) execCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
ExpectNoError(err, "failed to get pod %v", podName)
func execCommandInPodWithFullOutput(f *framework.Framework, podName string, cmd ...string) (string, string, error) {
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
return ExecCommandInContainerWithFullOutput(f, podName, pod.Spec.Containers[0].Name, cmd...)
}
// ExecShellInPod executes the specified command on the pod.
func (f *Framework) ExecShellInPod(podName string, cmd string) string {
return f.execCommandInPod(podName, "/bin/sh", "-c", cmd)
func ExecShellInPod(f *framework.Framework, podName string, cmd string) string {
return execCommandInPod(f, podName, "/bin/sh", "-c", cmd)
}
// ExecShellInPodWithFullOutput executes the specified command on the Pod and returns stdout, stderr and error.
func (f *Framework) ExecShellInPodWithFullOutput(podName string, cmd string) (string, string, error) {
return f.execCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
func ExecShellInPodWithFullOutput(f *framework.Framework, podName string, cmd string) (string, string, error) {
return execCommandInPodWithFullOutput(f, podName, "/bin/sh", "-c", cmd)
}
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {

View File

@ -0,0 +1,238 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package output
import (
"context"
"fmt"
"strings"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
apiv1pod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
const (
// Poll is how often to Poll pods, nodes and claims.
Poll = 2 * time.Second
)
// LookForStringInPodExec looks for the given string in the output of a command
// executed in the first container of specified pod.
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForStringInPodExecToContainer(ns, podName, "", command, expectedString, timeout)
}
// LookForStringInPodExecToContainer looks for the given string in the output of a
// command executed in specified pod container, or first container if not specified.
func LookForStringInPodExecToContainer(ns, podName, containerName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns)}
if len(containerName) > 0 {
args = append(args, fmt.Sprintf("--container=%s", containerName))
}
args = append(args, "--")
args = append(args, command...)
return e2ekubectl.RunKubectlOrDie(ns, args...)
})
}
// lookForString looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return e2ekubectl.RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
}
// RunHostCmdWithFullOutput runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell. It will also return the command's stderr.
func RunHostCmdWithFullOutput(ns, name, cmd string) (string, string, error) {
return e2ekubectl.RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
framework.Logf("stdout: %v", stdout)
framework.ExpectNoError(err)
return stdout
}
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
// until it succeeds or the specified timeout expires.
// This can be used with idempotent commands to deflake transient Node issues.
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
start := time.Now()
for {
out, err := RunHostCmd(ns, name, cmd)
if err == nil {
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
}
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
}
}
// LookForStringInLog looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
return e2ekubectl.RunKubectlOrDie(ns, "logs", podName, container)
})
}
// CreateEmptyFileOnPod creates empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}
// DumpDebugInfo dumps debug info of tests.
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := e2ekubectl.RunKubectl(ns, "describe", "po", s.Name)
framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
l, _ := e2ekubectl.RunKubectl(ns, "logs", s.Name, "--tail=100")
framework.Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
}
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func MatchContainerOutput(
f *framework.Framework,
pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
ns := pod.ObjectMeta.Namespace
if ns == "" {
ns = f.Namespace.Name
}
podClient := e2epod.PodClientNS(f, ns)
createdPod := podClient.Create(pod)
defer func() {
ginkgo.By("delete the pod")
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
_ = apiv1pod.VisitContainers(&podStatus.Spec, apiv1pod.AllFeatureEnabledContainers(), func(c *v1.Container, containerType apiv1pod.ContainerType) bool {
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
} else {
framework.Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, c.Name, logs)
}
return true
})
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
}
framework.Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
// TestContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
func TestContainerOutput(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
}
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
func TestContainerOutputRegexp(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
}
// TestContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func TestContainerOutputMatcher(f *framework.Framework,
scenarioName string,
pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
framework.Failf("Invalid container index: %d", containerIndex)
}
framework.ExpectNoError(MatchContainerOutput(f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package pod
import (
"context"
@ -39,9 +39,8 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/pkg/kubelet/util/format"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
@ -66,10 +65,10 @@ const (
// node e2e test.
var ImagePrePullList sets.String
// PodClient is a convenience method for getting a pod client interface in the framework's namespace,
// NewPodClient is a convenience method for getting a pod client interface in the framework's namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
// node e2e pod scheduling.
func (f *Framework) PodClient() *PodClient {
func NewPodClient(f *framework.Framework) *PodClient {
return &PodClient{
f: f,
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
@ -79,7 +78,7 @@ func (f *Framework) PodClient() *PodClient {
// PodClientNS is a convenience method for getting a pod client interface in an alternative namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
// node e2e pod scheduling.
func (f *Framework) PodClientNS(namespace string) *PodClient {
func PodClientNS(f *framework.Framework, namespace string) *PodClient {
return &PodClient{
f: f,
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
@ -88,7 +87,7 @@ func (f *Framework) PodClientNS(namespace string) *PodClient {
// PodClient is a struct for pod client.
type PodClient struct {
f *Framework
f *framework.Framework
v1core.PodInterface
}
@ -96,7 +95,7 @@ type PodClient struct {
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
c.mungeSpec(pod)
p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{})
ExpectNoError(err, "Error creating Pod")
framework.ExpectNoError(err, "Error creating Pod")
return p
}
@ -104,10 +103,10 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
namespace := c.f.Namespace.Name
p := c.Create(pod)
ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, PodStartTimeout))
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
ExpectNoError(err)
framework.ExpectNoError(err)
return p
}
@ -131,7 +130,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
// there is any other apierrors. name is the pod name, updateFn is the function updating the
// pod object.
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
@ -139,11 +138,11 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
updateFn(pod)
_, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{})
if err == nil {
Logf("Successfully updated pod %q", name)
framework.Logf("Successfully updated pod %q", name)
return true, nil
}
if apierrors.IsConflict(err) {
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
@ -155,22 +154,22 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta
namespace := c.f.Namespace.Name
podJS, err := json.Marshal(pod)
ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
framework.ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
ecPod := pod.DeepCopy()
ecPod.Spec.EphemeralContainers = append(ecPod.Spec.EphemeralContainers, *ec)
ecJS, err := json.Marshal(ecPod)
ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
framework.ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
return err
}
ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
framework.ExpectNoError(WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
return nil
}
@ -180,27 +179,27 @@ func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeou
namespace := c.f.Namespace.Name
err := c.Delete(context.TODO(), name, options)
if err != nil && !apierrors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)
framework.Failf("Failed to delete pod %q: %v", name, err)
}
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
}
// mungeSpec apply test-suite specific transformations to the pod spec.
func (c *PodClient) mungeSpec(pod *v1.Pod) {
if !TestContext.NodeE2E {
if !framework.TestContext.NodeE2E {
return
}
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(TestContext.NodeName)), "Test misconfigured")
pod.Spec.NodeName = TestContext.NodeName
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(framework.TestContext.NodeName)), "Test misconfigured")
pod.Spec.NodeName = framework.TestContext.NodeName
// Node e2e does not support the default DNSClusterFirst policy. Set
// the policy to DNSDefault, which is configured per node.
pod.Spec.DNSPolicy = v1.DNSDefault
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
// we should not munge ImagePullPolicy for cluster e2e pods.
if !TestContext.PrepullImages {
if !framework.TestContext.PrepullImages {
return
}
// If prepull is enabled, munge the container spec to make sure the images are not pulled
@ -226,7 +225,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
// TODO(random-liu): Move pod wait function into this file
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -243,7 +242,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
// WaitForFinish waits for pod to finish running, regardless of success or failure.
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
f := c.f
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -260,7 +259,7 @@ func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(Poll, PodStartTimeout, func() (bool, error) {
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
@ -284,7 +283,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
// MatchContainerOutput gets output of a container and match expected regexp in the output.
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
f := c.f
output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
if err != nil {
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
}
@ -301,6 +300,6 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
// PodIsReady returns true if the specified pod is ready. Otherwise false.
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
ExpectNoError(err)
framework.ExpectNoError(err)
return podutils.IsPodReady(pod)
}

View File

@ -35,7 +35,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -64,7 +64,7 @@ func expectNoError(err error, explain ...interface{}) {
// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f").
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
}
@ -122,10 +122,10 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
@ -136,20 +136,20 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
what = "expected"
want := pod.Name
if got != want {
e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response",
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
@ -183,7 +183,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
}
created = append(created, pod)
}
e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
framework.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
@ -267,17 +267,17 @@ func LogPodStates(pods []v1.Pod) {
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
e2elog.Logf("") // Final empty line helps for readability.
framework.Logf("") // Final empty line helps for readability.
}
// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows
@ -286,12 +286,12 @@ func logPodTerminationMessages(pods []v1.Pod) {
for _, pod := range pods {
for _, status := range pod.Status.InitContainerStatuses {
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
framework.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
}
}
for _, status := range pod.Status.ContainerStatuses {
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
framework.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
}
}
}
@ -330,21 +330,21 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
for _, container := range pod.Spec.Containers {
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
if err != nil {
e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
framework.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
continue
}
logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name)
err = os.MkdirAll(logDir, 0755)
if err != nil {
e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err)
framework.Logf("Unable to create path '%s'. Err: %v", logDir, err)
continue
}
logPath := filepath.Join(logDir, "logs.txt")
err = os.WriteFile(logPath, []byte(logs), 0644)
if err != nil {
e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
framework.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
}
}
}
@ -354,7 +354,7 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("unable to fetch pod debug info: %v", err)
framework.Logf("unable to fetch pod debug info: %v", err)
}
LogPodStates(pods.Items)
logPodTerminationMessages(pods.Items)
@ -443,7 +443,7 @@ func newExecPodSpec(ns, generateName string) *v1.Pod {
// CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands.
// Pod name is uniquely generated.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
e2elog.Logf("Creating new exec pod")
framework.Logf("Creating new exec pod")
pod := newExecPodSpec(ns, generateName)
if tweak != nil {
tweak(pod)
@ -495,7 +495,7 @@ func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
framework.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
type waitPodResult struct {
success bool
podName string
@ -513,11 +513,11 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
for range podNames {
res := <-result
if !res.success {
e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
framework.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
success = false
}
}
e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
framework.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
@ -600,7 +600,7 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := getNodeTTLAnnotationValue(c)
if err != nil {
e2elog.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
framework.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout

View File

@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/util/podutils"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
)
@ -186,7 +186,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
ignoreSelector := labels.SelectorFromSet(map[string]string{})
start := time.Now()
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
var ignoreNotReady bool
badPods := []v1.Pod{}
@ -241,25 +241,25 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
case res && err == nil:
nOk++
case pod.Status.Phase == v1.PodSucceeded:
e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
framework.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
// it doesn't make sense to wait for this pod
continue
case pod.Status.Phase != v1.PodFailed:
e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
framework.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
notReady++
badPods = append(badPods, pod)
default:
if metav1.GetControllerOf(&pod) == nil {
e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
framework.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
framework.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
framework.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
@ -271,7 +271,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
if !ignoreNotReady {
return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)
}
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
}
return nil
}
@ -280,7 +280,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
// If the condition callback returns an error that matches FinalErr (checked with IsFinal),
// then polling aborts early.
func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
framework.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
var (
lastPodError error
lastPod *v1.Pod
@ -295,15 +295,15 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
lastPod = pod // Don't overwrite if an error occurs after successfully retrieving.
// log now so that current pod info is reported before calling `condition()`
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
framework.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
e2elog.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
framework.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
}
return true, err
} else if err != nil {
e2elog.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
framework.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
if IsFinal(err) {
return false, err
}
@ -329,7 +329,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
// WaitForPodsCondition waits for the listed pods to match the given condition.
// To succeed, at least minPods must be listed, and all listed pods must match the condition.
func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) {
e2elog.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
var pods *v1.PodList
matched := 0
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
@ -338,7 +338,7 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
return handleWaitingAPIError(err, true, "listing pods")
}
if len(pods.Items) < minPods {
e2elog.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
framework.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
return false, nil
}
@ -356,7 +356,7 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
if len(nonMatchingPods) <= 0 {
return true, nil // All pods match.
}
e2elog.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
framework.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
return false, nil
})
return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched)
@ -481,16 +481,16 @@ func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace
return WaitForPodCondition(c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
e2elog.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
return false, errPodFailed
case v1.PodSucceeded:
e2elog.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
return false, errPodCompleted
case v1.PodRunning:
e2elog.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
framework.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
return podutils.IsPodReady(pod), nil
}
e2elog.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
framework.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
return false, nil
})
}
@ -551,7 +551,7 @@ func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, ti
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
var lastPod *v1.Pod
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
e2elog.Logf("Waiting for pod %s to disappear", podName)
framework.Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
@ -560,14 +560,14 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
found := false
for i, pod := range pods.Items {
if pod.Name == podName {
e2elog.Logf("Pod %s still exists", podName)
framework.Logf("Pod %s still exists", podName)
found = true
lastPod = &(pods.Items[i])
break
}
}
if !found {
e2elog.Logf("Pod %s no longer exists", podName)
framework.Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
@ -644,7 +644,7 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
pods = FilterNonRestartablePods(allPods)
if len(pods) != expect {
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
e2elog.Logf("Error getting pods: %v", errLast)
framework.Logf("Error getting pods: %v", errLast)
return false, nil
}
return true, nil
@ -734,17 +734,17 @@ func WaitForContainerRunning(c clientset.Interface, namespace, podName, containe
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
if retryNotFound && apierrors.IsNotFound(err) {
e2elog.Logf("Ignoring NotFound error while " + taskDescription)
framework.Logf("Ignoring NotFound error while " + taskDescription)
return false, nil
}
if retry, delay := shouldRetry(err); retry {
e2elog.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
framework.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
if delay > 0 {
time.Sleep(delay)
}
return false, nil
}
e2elog.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
framework.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
return false, err
}

View File

@ -0,0 +1,90 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package providers
import (
"fmt"
"os"
"path"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
const etcdImage = "3.5.5-0"
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {
switch framework.TestContext.Provider {
case "gce":
return etcdUpgradeGCE(targetStorage, targetVersion)
default:
return fmt.Errorf("EtcdUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
}
}
func etcdUpgradeGCE(targetStorage, targetVersion string) error {
env := append(
os.Environ(),
"TEST_ETCD_VERSION="+targetVersion,
"STORAGE_BACKEND="+targetStorage,
"TEST_ETCD_IMAGE="+etcdImage)
_, _, err := framework.RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
return err
}
// LocationParamGKE returns parameter related to location for gcloud command.
func LocationParamGKE() string {
if framework.TestContext.CloudConfig.MultiMaster {
// GKE Regional Clusters are being tested.
return fmt.Sprintf("--region=%s", framework.TestContext.CloudConfig.Region)
}
return fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone)
}
// MasterUpgradeGKE upgrades master node to the specified version on GKE.
func MasterUpgradeGKE(namespace string, v string) error {
framework.Logf("Upgrading master to %q", v)
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
LocationParamGKE(),
"upgrade",
framework.TestContext.CloudConfig.Cluster,
"--master",
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
_, _, err := framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
e2enode.WaitForSSHTunnels(namespace)
return nil
}
// GCEUpgradeScript returns path of script for upgrading on GCE.
func GCEUpgradeScript() string {
if len(framework.TestContext.GCEUpgradeScript) == 0 {
return path.Join(framework.TestContext.RepoRoot, "cluster/gce/upgrade.sh")
}
return framework.TestContext.GCEUpgradeScript
}

View File

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/test/e2e/framework"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
testutils "k8s.io/kubernetes/test/utils"
@ -83,7 +83,7 @@ func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGette
// and will wait for all pods it spawns to become "Running".
func RunRC(config testutils.RCConfig) error {
ginkgo.By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = framework.DumpNodeDebugInfo
config.NodeDumpFunc = e2edebug.DumpNodeDebugInfo
config.ContainerDumpFunc = e2ekubectl.LogFailedContainers
return testutils.RunRC(config)
}

View File

@ -47,7 +47,7 @@ func LoadAppArmorProfiles(nsName string, clientset clientset.Interface) {
// CreateAppArmorTestPod creates a pod that tests apparmor profile enforcement. The pod exits with
// an error code if the profile is incorrectly enforced. If runOnce is true the pod will exit after
// a single test, otherwise it will repeat the test every 1 second until failure.
func CreateAppArmorTestPod(nsName string, clientset clientset.Interface, podClient *framework.PodClient, unconfined bool, runOnce bool) *v1.Pod {
func CreateAppArmorTestPod(nsName string, clientset clientset.Interface, podClient *e2epod.PodClient, unconfined bool, runOnce bool) *v1.Pod {
profile := "localhost/" + appArmorProfilePrefix + nsName
testCmd := fmt.Sprintf(`
if touch %[1]s; then

View File

@ -45,6 +45,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -910,7 +911,7 @@ func testEndpointReachability(endpoint string, port int32, protocol v1.Protocol,
}
err := wait.PollImmediate(1*time.Second, ServiceReachabilityShortPollTimeout, func() (bool, error) {
_, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
_, err := e2epodoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("Service reachability failing with error: %v\nRetrying...", err)
return false, nil
@ -1002,7 +1003,7 @@ func (j *TestJig) checkExternalServiceReachability(svc *v1.Service, pod *v1.Pod)
// Service must resolve to IP
cmd := fmt.Sprintf("nslookup %s", svcName)
return wait.PollImmediate(framework.Poll, ServiceReachabilityShortPollTimeout, func() (done bool, err error) {
_, stderr, err := framework.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd)
_, stderr, err := e2epodoutput.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd)
// NOTE(claudiub): nslookup may return 0 on Windows, even though the DNS name was not found. In this case,
// we can check stderr for the error.
if err != nil || (framework.NodeOSDistroIs("windows") && strings.Contains(stderr, fmt.Sprintf("can't find %s", svcName))) {

View File

@ -35,7 +35,7 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
@ -125,7 +125,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
// If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs.
if len(hosts) < len(nodelist.Items) {
e2elog.Logf("No external IP address on nodes, falling back to internal IPs")
framework.Logf("No external IP address on nodes, falling back to internal IPs")
hosts = nodeAddresses(nodelist, v1.NodeInternalIP)
}
@ -146,12 +146,12 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
go func(host string) {
defer wg.Done()
if canConnect(host) {
e2elog.Logf("Assuming SSH on host %s", host)
framework.Logf("Assuming SSH on host %s", host)
sshHostsLock.Lock()
sshHosts = append(sshHosts, net.JoinHostPort(host, SSHPort))
sshHostsLock.Unlock()
} else {
e2elog.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
framework.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
}
}(host)
}
@ -168,7 +168,7 @@ func canConnect(host string) bool {
hostPort := net.JoinHostPort(host, SSHPort)
conn, err := net.DialTimeout("tcp", hostPort, 3*time.Second)
if err != nil {
e2elog.Logf("cannot dial %s: %v", hostPort, err)
framework.Logf("cannot dial %s: %v", hostPort, err)
return false
}
conn.Close()
@ -352,15 +352,15 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
// LogResult records result log
func LogResult(result Result) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
e2elog.Logf("ssh %s: command: %s", remote, result.Cmd)
e2elog.Logf("ssh %s: stdout: %q", remote, result.Stdout)
e2elog.Logf("ssh %s: stderr: %q", remote, result.Stderr)
e2elog.Logf("ssh %s: exit code: %d", remote, result.Code)
framework.Logf("ssh %s: command: %s", remote, result.Cmd)
framework.Logf("ssh %s: stdout: %q", remote, result.Stdout)
framework.Logf("ssh %s: stderr: %q", remote, result.Stderr)
framework.Logf("ssh %s: exit code: %d", remote, result.Code)
}
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
e2elog.Logf("Getting external IP address for %s", node.Name)
framework.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
@ -383,7 +383,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
}
e2elog.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
framework.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogResult(result)
@ -454,7 +454,7 @@ func expectNoError(err error, explain ...interface{}) {
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
}

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/util/podutils"
"k8s.io/kubernetes/test/e2e/framework"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -165,7 +166,7 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
if resumedPod != "" {
framework.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
}
_, err := framework.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
_, err := e2epodoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
framework.ExpectNoError(err)
framework.Logf("Resumed pod %v", pod.Name)
resumedPod = pod.Name

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubectl/pkg/util/podutils"
"k8s.io/kubernetes/test/e2e/framework"
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
@ -192,7 +193,7 @@ func CheckHostname(c clientset.Interface, ss *appsv1.StatefulSet) error {
cmd := "printf $(hostname)"
podList := GetPodList(c, ss)
for _, statefulPod := range podList.Items {
hostname, err := framework.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
hostname, err := e2epodoutput.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
if err != nil {
return err
}
@ -236,7 +237,7 @@ func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error
func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error {
podList := GetPodList(c, ss)
for _, statefulPod := range podList.Items {
stdout, err := framework.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
stdout, err := e2epodoutput.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
framework.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
if err != nil {
return err

View File

@ -197,6 +197,9 @@ type TestContextType struct {
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
// simulate node failures.
//
// TODO: move this and the corresponding command line flags into
// test/e2e/framework/node.
type NodeKillerConfig struct {
// Enabled determines whether NodeKill should do anything at all.
// All other options below are ignored if Enabled = false.

View File

@ -22,9 +22,10 @@ import (
"bytes"
"fmt"
"sync"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/perftype"
"sync"
)
var now = time.Now

View File

@ -23,27 +23,22 @@ import (
"fmt"
"io"
"math/rand"
"net"
"net/url"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
@ -57,28 +52,13 @@ import (
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
watchtools "k8s.io/client-go/tools/watch"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// AllContainers specifies that all containers be visited
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
AllContainers = InitContainers | Containers | EphemeralContainers
)
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
@ -543,199 +523,6 @@ func RandomSuffix() string {
return strconv.Itoa(rand.Intn(10000))
}
// LookForStringInPodExec looks for the given string in the output of a command
// executed in the first container of specified pod.
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForStringInPodExecToContainer(ns, podName, "", command, expectedString, timeout)
}
// LookForStringInPodExecToContainer looks for the given string in the output of a
// command executed in specified pod container, or first container if not specified.
func LookForStringInPodExecToContainer(ns, podName, containerName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns)}
if len(containerName) > 0 {
args = append(args, fmt.Sprintf("--container=%s", containerName))
}
args = append(args, "--")
args = append(args, command...)
return RunKubectlOrDie(ns, args...)
})
}
// lookForString looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// KubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type KubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
b := new(KubectlBuilder)
tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath, namespace)
b.cmd = tk.KubectlCmd(args...)
return b
}
// WithEnv sets the given environment and returns itself.
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
b.cmd.Env = env
return b
}
// WithTimeout sets the given timeout and returns itself.
func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder {
b.timeout = t
return b
}
// WithStdinData sets the given data to stdin and returns itself.
func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
// WithStdinReader sets the given reader and returns itself.
func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
b.cmd.Stdin = reader
return &b
}
// ExecOrDie runs the kubectl executable or dies if error occurs.
func (b KubectlBuilder) ExecOrDie(namespace string) string {
str, err := b.Exec()
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl(namespace, "version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
ExpectNoError(err)
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
case net.Error:
if err.Timeout() {
return true
}
}
return false
}
// Exec runs the kubectl executable.
func (b KubectlBuilder) Exec() (string, error) {
stdout, _, err := b.ExecWithFullOutput()
return stdout, err
}
// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr.
func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc = 127
if ee, ok := err.(*exec.ExitError); ok {
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
Logf("rc: %d", rc)
}
return stdout.String(), stderr.String(), uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
Logf("stdout: %q", stdout.String())
return stdout.String(), stderr.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(namespace string, args ...string) string {
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(namespace string, args ...string) (string, error) {
return NewKubectlCommand(namespace, args...).Exec()
}
// RunKubectlWithFullOutput is a convenience wrapper over kubectlBuilder
// It will also return the command's stderr.
func RunKubectlWithFullOutput(namespace string, args ...string) (string, string, error) {
return NewKubectlCommand(namespace, args...).ExecWithFullOutput()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
}
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
}
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
func RunKubemciWithKubeconfig(args ...string) (string, error) {
if TestContext.KubeConfig != "" {
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
}
return RunKubemciCmd(args...)
}
// RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
// It assumes that kubemci exists in PATH.
func RunKubemciCmd(args ...string) (string, error) {
// kubemci is assumed to be in PATH.
kubemci := "kubemci"
b := new(KubectlBuilder)
args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID)
b.cmd = exec.Command(kubemci, args...)
return b.Exec()
}
// StartCmdAndStreamOutput returns stdout and stderr after starting the given cmd.
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
@ -758,449 +545,6 @@ func TryKill(cmd *exec.Cmd) {
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// ContainerType signifies container type
type ContainerType int
const (
// Containers is for normal containers
Containers ContainerType = 1 << iota
// InitContainers is for init containers
InitContainers
// EphemeralContainers is for ephemeral containers
EphemeralContainers
)
// allFeatureEnabledContainers returns a ContainerType mask which includes all container
// types except for the ones guarded by feature gate.
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
func allFeatureEnabledContainers() ContainerType {
return AllContainers
}
// ContainerVisitor is called with each container spec, and returns true
// if visiting should continue.
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool)
// visitContainers invokes the visitor function with a pointer to every container
// spec in the given pod spec with type set in mask. If visitor returns false,
// visiting is short-circuited. visitContainers returns true if visiting completes,
// false if visiting was short-circuited.
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
func visitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool {
if mask&InitContainers != 0 {
for i := range podSpec.InitContainers {
if !visitor(&podSpec.InitContainers[i], InitContainers) {
return false
}
}
}
if mask&Containers != 0 {
for i := range podSpec.Containers {
if !visitor(&podSpec.Containers[i], Containers) {
return false
}
}
}
if mask&EphemeralContainers != 0 {
for i := range podSpec.EphemeralContainers {
if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) {
return false
}
}
}
return true
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
ns := pod.ObjectMeta.Namespace
if ns == "" {
ns = f.Namespace.Name
}
podClient := f.PodClientNS(ns)
createdPod := podClient.Create(pod)
defer func() {
ginkgo.By("delete the pod")
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
_ = visitContainers(&podStatus.Spec, allFeatureEnabledContainers(), func(c *v1.Container, containerType ContainerType) bool {
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
} else {
Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, c.Name, logs)
}
return true
})
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
// EventsLister is a func that lists events.
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
// dumpEventsInNamespace dumps events in the given namespace.
func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := eventsLister(metav1.ListOptions{}, namespace)
ExpectNoError(err, "failed to list events in namespace %q", namespace)
ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items)))
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
}
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.CoreV1().Events(ns).List(context.TODO(), opts)
}, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace, TestContext.ReportDir)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := TestContext.MaxNodesToGather
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
if len(nodes.Items) <= maxNodesForDump {
dumpAllNodeInfo(c, nodes)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []v1.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
}
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, Logf)
}
// DumpNodeDebugInfo dumps debug information of the given nodes.
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := getKubeletPods(c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
continue
}
for _, p := range podList.Items {
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
logFunc("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
logFunc("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
// TODO: Log node resource info
}
}
// getKubeletPods retrieves the list of pods on the kubelet.
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
var client restclient.Result
finished := make(chan struct{}, 1)
go func() {
// call chain tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165
client = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, KubeletPort)).
Suffix("pods").
Do(context.TODO())
finished <- struct{}{}
}()
select {
case <-finished:
result := &v1.PodList{}
if err := client.Into(result); err != nil {
return &v1.PodList{}, err
}
return result, nil
case <-time.After(PodGetTimeout):
return &v1.PodList{}, fmt.Errorf("Waiting up to %v for getting the list of pods", PodGetTimeout)
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": metav1.NamespaceAll,
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}
}
return events.Items
}
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
if TestContext.AllowedNotReadyNodes == -1 {
return nil
}
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
return wait.PollImmediate(
30*time.Second,
timeout,
e2enode.CheckReadyForTests(c, TestContext.NonblockingTaints, TestContext.AllowedNotReadyNodes, largeClusterThreshold),
)
}
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
// ExpectNodeHasLabel expects that the given node has the given label pair.
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
ExpectNoError(err)
ExpectEqual(node.Labels[labelKey], labelValue)
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
ginkgo.By("verifying the node doesn't have the label " + labelKey)
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
// ExpectNodeHasTaint expects that the node has the given taint.
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
ExpectNoError(err)
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
// NodeHasTaint returns true if the node has the given taint, else returns false.
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
}
// RunHostCmdWithFullOutput runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell. It will also return the command's stderr.
func RunHostCmdWithFullOutput(ns, name, cmd string) (string, string, error) {
return RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
// until it succeeds or the specified timeout expires.
// This can be used with idempotent commands to deflake transient Node issues.
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
start := time.Now()
for {
out, err := RunHostCmd(ns, name, cmd)
if err == nil {
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
}
Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
}
}
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
// TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
if TestContext.AllowedNotReadyNodes == -1 {
return nil
}
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
return len(notReady) <= TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > TestContext.AllowedNotReadyNodes {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// LookForStringInLog looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
return RunKubectlOrDie(ns, "logs", podName, container)
})
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
@ -1323,25 +667,6 @@ func GetControlPlaneAddresses(c clientset.Interface) []string {
return ips.List()
}
// CreateEmptyFileOnPod creates empty file at given path on the pod.
// TODO(alejandrox1): move to subpkg pod once kubectl methods have been refactored.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}
// DumpDebugInfo dumps debug info of tests.
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := RunKubectl(ns, "describe", "po", s.Name)
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
l, _ := RunKubectl(ns, "logs", s.Name, "--tail=100")
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
}
}
// PrettyPrintJSON converts metrics to JSON format.
func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
@ -1357,16 +682,6 @@ func PrettyPrintJSON(metrics interface{}) string {
return formatted.String()
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}
// WatchEventSequenceVerifier ...
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
//

View File

@ -56,7 +56,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
@ -354,7 +356,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
}
}
if config.ServerReadyMessage != "" {
_, err := framework.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
_, err := e2epodoutput.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
@ -475,7 +477,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Block: check content
deviceName := fmt.Sprintf("/opt/%d", i)
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
@ -484,7 +486,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
@ -495,14 +497,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
@ -549,7 +551,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
}
ec.Resources = v1.ResourceRequirements{}
ec.Name = "volume-ephemeral-container"
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
@ -587,7 +589,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
}
out, err := framework.RunKubectl(injectorPod.Namespace, commands...)
out, err := e2ekubectl.RunKubectl(injectorPod.Namespace, commands...)
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
}
@ -648,7 +650,7 @@ func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persi
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed

View File

@ -23,10 +23,11 @@ import (
"sync"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -118,7 +119,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
// we don't validate total log data, since there is no guarantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
s, err := e2eoutput.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err
},
},

View File

@ -95,7 +95,7 @@ func (p *loadLoggingPod) Name() string {
func (p *loadLoggingPod) Start(f *framework.Framework) error {
framework.Logf("Starting load logging pod %s", p.name)
f.PodClient().Create(&v1.Pod{
e2epod.NewPodClient(f).Create(&v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Name: p.name,
},
@ -161,7 +161,7 @@ func (p *execLoggingPod) Name() string {
func (p *execLoggingPod) Start(f *framework.Framework) error {
framework.Logf("Starting repeating logging pod %s", p.name)
f.PodClient().Create(&v1.Pod{
e2epod.NewPodClient(f).Create(&v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Name: p.name,
},

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"k8s.io/kubernetes/test/e2e/scheduling"
@ -81,7 +82,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
scheduling.SetupNVIDIAGPUNode(f, false)
f.PodClient().Create(&v1.Pod{
e2epod.NewPodClient(f).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: rcName,
},

Some files were not shown because too many files have changed in this diff Show More