e2e refactor: cleanup Logf form framework/util

This commit is contained in:
SataQiu 2019-05-24 15:59:55 +08:00
parent a07b027261
commit d3a902ff5b
65 changed files with 646 additions and 585 deletions

View File

@ -490,19 +490,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("created pod") e2elog.Logf("created pod")
if !framework.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { if !framework.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
} }
framework.Logf("pod is ready") e2elog.Logf("pod is ready")
var logs string var logs string
if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) { if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) {
framework.Logf("polling logs") e2elog.Logf("polling logs")
logs, err = framework.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") logs, err = framework.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient")
if err != nil { if err != nil {
framework.Logf("Error pulling logs: %v", err) e2elog.Logf("Error pulling logs: %v", err)
return false, nil return false, nil
} }
tokenCount, err := parseInClusterClientLogs(logs) tokenCount, err := parseInClusterClientLogs(logs)
@ -510,7 +510,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
return false, fmt.Errorf("inclusterclient reported an error: %v", err) return false, fmt.Errorf("inclusterclient reported an error: %v", err)
} }
if tokenCount < 2 { if tokenCount < 2 {
framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount) e2elog.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount)
return false, nil return false, nil
} }
return true, nil return true, nil

View File

@ -109,6 +109,7 @@ go_library(
"//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library",
"//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/testfiles:go_default_library",

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/framework/testfiles"
) )
@ -114,7 +115,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
func (f *Framework) PatchItems(items ...interface{}) error { func (f *Framework) PatchItems(items ...interface{}) error {
for _, item := range items { for _, item := range items {
// Uncomment when debugging the loading and patching of items. // Uncomment when debugging the loading and patching of items.
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item)) // e2elog.Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := f.patchItemRecursively(item); err != nil { if err := f.patchItemRecursively(item); err != nil {
return err return err
} }
@ -153,7 +154,7 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
// to non-namespaced items. // to non-namespaced items.
for _, destructor := range destructors { for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrs.IsNotFound(err) { if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
Logf("deleting failed: %s", err) e2elog.Logf("deleting failed: %s", err)
} }
} }
} }
@ -166,12 +167,12 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
description := DescribeItem(item) description := DescribeItem(item)
// Uncomment this line to get a full dump of the entire item. // Uncomment this line to get a full dump of the entire item.
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item)) // description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
Logf("creating %s", description) e2elog.Logf("creating %s", description)
for _, factory := range factories { for _, factory := range factories {
destructor, err := factory.Create(f, item) destructor, err := factory.Create(f, item)
if destructor != nil { if destructor != nil {
destructors = append(destructors, func() error { destructors = append(destructors, func() error {
Logf("deleting %s", description) e2elog.Logf("deleting %s", description)
return destructor() return destructor()
}) })
} }
@ -399,7 +400,7 @@ func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, er
return nil, errorItemNotSupported return nil, errorItemNotSupported
} }
Logf("Define cluster role %v", item.GetName()) e2elog.Logf("Define cluster role %v", item.GetName())
client := f.ClientSet.RbacV1().ClusterRoles() client := f.ClientSet.RbacV1().ClusterRoles()
if _, err := client.Create(item); err != nil { if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create ClusterRole") return nil, errors.Wrap(err, "create ClusterRole")

View File

@ -30,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
const ( const (
@ -42,12 +43,12 @@ func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) { for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) {
endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
framework.Logf("Endpoint %s/%s is not ready yet", ns, name) e2elog.Logf("Endpoint %s/%s is not ready yet", ns, name)
continue continue
} }
framework.ExpectNoError(err, "Failed to get endpoints for %s/%s", ns, name) framework.ExpectNoError(err, "Failed to get endpoints for %s/%s", ns, name)
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
framework.Logf("Endpoint %s/%s is not ready yet", ns, name) e2elog.Logf("Endpoint %s/%s is not ready yet", ns, name)
continue continue
} }
return nil return nil

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/tools/remotecommand"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/gomega" "github.com/onsi/gomega"
) )
@ -48,7 +49,7 @@ type ExecOptions struct {
// returning stdout, stderr and error. `options` allowed for // returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed. // additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) { func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
Logf("ExecWithOptions %+v", options) e2elog.Logf("ExecWithOptions %+v", options)
config, err := LoadConfig() config, err := LoadConfig()
ExpectNoError(err, "failed to load restclient config") ExpectNoError(err, "failed to load restclient config")
@ -97,7 +98,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
// ExecCommandInContainer executes a command in the specified container. // ExecCommandInContainer executes a command in the specified container.
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string { func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...) stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
Logf("Exec stderr: %q", stderr) e2elog.Logf("Exec stderr: %q", stderr)
ExpectNoError(err, ExpectNoError(err,
"failed to execute command in pod %v, container %v: %v", "failed to execute command in pod %v, container %v: %v",
podName, containerName, err) podName, containerName, err)

View File

@ -20,6 +20,8 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"sync" "sync"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
// FlakeReport is a struct for managing the flake report. // FlakeReport is a struct for managing the flake report.
@ -57,7 +59,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
if desc != "" { if desc != "" {
msg = fmt.Sprintf("%v (Description: %v)", msg, desc) msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
} }
Logf(msg) e2elog.Logf(msg)
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
f.Flakes = append(f.Flakes, msg) f.Flakes = append(f.Flakes, msg)

View File

@ -46,6 +46,7 @@ import (
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/restmapper" "k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale" scaleclient "k8s.io/client-go/scale"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -222,7 +223,7 @@ func (f *Framework) BeforeEach() {
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
ExpectNoError(err) ExpectNoError(err)
} else { } else {
Logf("Skipping waiting for service account") e2elog.Logf("Skipping waiting for service account")
} }
f.UniqueName = f.Namespace.GetName() f.UniqueName = f.Namespace.GetName()
} else { } else {
@ -250,7 +251,7 @@ func (f *Framework) BeforeEach() {
PrintVerboseLogs: false, PrintVerboseLogs: false,
}, nil) }, nil)
if err != nil { if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err) e2elog.Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else { } else {
go f.gatherer.StartGatheringData() go f.gatherer.StartGatheringData()
} }
@ -271,13 +272,13 @@ func (f *Framework) BeforeEach() {
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics { if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics) grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil { if err != nil {
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err) e2elog.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
} else { } else {
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab() f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
if err != nil { if err != nil {
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err) e2elog.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
} else { } else {
Logf("Gathered ClusterAutoscaler metrics before test") e2elog.Logf("Gathered ClusterAutoscaler metrics before test")
} }
} }
@ -308,15 +309,15 @@ func (f *Framework) AfterEach() {
if !apierrors.IsNotFound(err) { if !apierrors.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err nsDeletionErrors[ns.Name] = err
} else { } else {
Logf("Namespace %v was already deleted", ns.Name) e2elog.Logf("Namespace %v was already deleted", ns.Name)
} }
} }
} }
} else { } else {
if !TestContext.DeleteNamespace { if !TestContext.DeleteNamespace {
Logf("Found DeleteNamespace=false, skipping namespace deletion!") e2elog.Logf("Found DeleteNamespace=false, skipping namespace deletion!")
} else { } else {
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!") e2elog.Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
} }
} }
@ -363,11 +364,11 @@ func (f *Framework) AfterEach() {
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark") grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics) grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil { if err != nil {
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err) e2elog.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
} else { } else {
received, err := grabber.Grab() received, err := grabber.Grab()
if err != nil { if err != nil {
Logf("MetricsGrabber failed to grab some of the metrics: %v", err) e2elog.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
} }
(*MetricsForE2E)(&received).computeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest) (*MetricsForE2E)(&received).computeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
f.TestSummaries = append(f.TestSummaries, (*MetricsForE2E)(&received)) f.TestSummaries = append(f.TestSummaries, (*MetricsForE2E)(&received))
@ -487,7 +488,7 @@ func (f *Framework) WriteFileViaContainer(podName, containerName string, path st
command := fmt.Sprintf("echo '%s' > '%s'", contents, path) command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command) stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
if err != nil { if err != nil {
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
} }
return err return err
} }
@ -498,7 +499,7 @@ func (f *Framework) ReadFileViaContainer(podName, containerName string, path str
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path) stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
if err != nil { if err != nil {
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
} }
return string(stdout), err return string(stdout), err
} }
@ -509,7 +510,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path) stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
if err != nil { if err != nil {
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
} }
return string(stdout), err return string(stdout), err
} }
@ -546,7 +547,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
TargetPort: intstr.FromInt(contPort), TargetPort: intstr.FromInt(contPort),
}} }}
} }
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName) e2elog.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{ service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "service-for-" + appName, Name: "service-for-" + appName,
@ -572,7 +573,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
for i, node := range nodes.Items { for i, node := range nodes.Items {
// one per node, but no more than maxCount. // one per node, but no more than maxCount.
if i <= maxCount { if i <= maxCount {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{ _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i), Name: fmt.Sprintf(appName+"-pod-%v", i),
@ -643,19 +644,19 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) { func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ { for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
if numRetries > 0 { if numRetries > 0 {
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries) e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
} }
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...) stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
if err != nil { if err != nil {
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") { if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
// Retry on "i/o timeout" errors // Retry on "i/o timeout" errors
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
continue continue
} }
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") { if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
// Retry on "container not found" errors // Retry on "container not found" errors
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
continue continue
} }
@ -680,7 +681,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
cmd := KubectlCmd(cmdArgs...) cmd := KubectlCmd(cmdArgs...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " ")) e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
err := cmd.Run() err := cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err return stdout.Bytes(), stderr.Bytes(), err
} }
@ -787,7 +788,7 @@ func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Names
ns := namespace.Name ns := namespace.Name
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against. pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors) e2elog.Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil { if len(pl.Items) == 0 || err != nil {
return pl.Items, err return pl.Items, err
} }
@ -802,7 +803,7 @@ ReturnPodsSoFar:
} }
passesVerify, err := passesVerifyFilter(pod, p.Verify) passesVerify, err := passesVerifyFilter(pod, p.Verify)
if err != nil { if err != nil {
Logf("Error detected on %v : %v !", pod.Name, err) e2elog.Logf("Error detected on %v : %v !", pod.Name, err)
break ReturnPodsSoFar break ReturnPodsSoFar
} }
if passesVerify { if passesVerify {
@ -823,12 +824,12 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
// Failure // Failure
if returnedErr != nil { if returnedErr != nil {
Logf("Cutting polling short: We got an error from the pod filtering layer.") e2elog.Logf("Cutting polling short: We got an error from the pod filtering layer.")
// stop polling if the pod filtering returns an error. that should never happen. // stop polling if the pod filtering returns an error. that should never happen.
// it indicates, for example, that the client is broken or something non-pod related. // it indicates, for example, that the client is broken or something non-pod related.
return false, returnedErr return false, returnedErr
} }
Logf("Found %v / %v", len(pods), atLeast) e2elog.Logf("Found %v / %v", len(pods), atLeast)
// Success // Success
if len(pods) >= atLeast { if len(pods) >= atLeast {
@ -837,7 +838,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
// Keep trying... // Keep trying...
return false, nil return false, nil
}) })
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast) e2elog.Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
return pods, err return pods, err
} }
@ -860,24 +861,24 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
if len(pods) == 0 { if len(pods) == 0 {
Failf("No pods matched the filter.") Failf("No pods matched the filter.")
} }
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods)) e2elog.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
for _, p := range pods { for _, p := range pods {
podFunc(p) podFunc(p)
} }
} else { } else {
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err) e2elog.Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
} }
return err return err
} }
// GetLogToFileFunc is a convenience function that returns a function that have the same interface as // GetLogToFileFunc is a convenience function that returns a function that have the same interface as
// Logf, but writes to a specified file. // e2elog.Logf, but writes to a specified file.
func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) { func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
return func(format string, args ...interface{}) { return func(format string, args ...interface{}) {
writer := bufio.NewWriter(file) writer := bufio.NewWriter(file)
if _, err := fmt.Fprintf(writer, format, args...); err != nil { if _, err := fmt.Fprintf(writer, format, args...); err != nil {
Logf("Failed to write file %v with test performance data: %v", file.Name(), err) e2elog.Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
} }
writer.Flush() writer.Flush()
} }

View File

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"strings" "strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
) )
@ -46,7 +47,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
// Get kubernetes component resource usage // Get kubernetes component resource usage
sshResult, err := getMasterUsageByPrefix("kube") sshResult, err := getMasterUsageByPrefix("kube")
if err != nil { if err != nil {
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err) e2elog.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
return nil return nil
} }
scanner := bufio.NewScanner(strings.NewReader(sshResult)) scanner := bufio.NewScanner(strings.NewReader(sshResult))
@ -64,7 +65,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
// Get etcd resource usage // Get etcd resource usage
sshResult, err = getMasterUsageByPrefix("bin/etcd") sshResult, err = getMasterUsageByPrefix("bin/etcd")
if err != nil { if err != nil {
Logf("Error when trying to SSH to master machine. Skipping probe") e2elog.Logf("Error when trying to SSH to master machine. Skipping probe")
return nil return nil
} }
scanner = bufio.NewScanner(strings.NewReader(sshResult)) scanner = bufio.NewScanner(strings.NewReader(sshResult))

View File

@ -23,6 +23,8 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
// TODO: These should really just use the GCE API client library or at least use // TODO: These should really just use the GCE API client library or at least use
@ -46,9 +48,9 @@ func lookupClusterImageSources() (string, string, error) {
str = strings.Replace(str, ";", "\n", -1) str = strings.Replace(str, ";", "\n", -1)
lines := strings.Split(str, "\n") lines := strings.Split(str, "\n")
if err != nil { if err != nil {
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err) e2elog.Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
for _, l := range lines { for _, l := range lines {
Logf(" > %s", l) e2elog.Logf(" > %s", l)
} }
} }
return lines, err return lines, err
@ -112,11 +114,11 @@ func lookupClusterImageSources() (string, string, error) {
func LogClusterImageSources() { func LogClusterImageSources() {
masterImg, nodeImg, err := lookupClusterImageSources() masterImg, nodeImg, err := lookupClusterImageSources()
if err != nil { if err != nil {
Logf("Cluster image sources lookup failed: %v\n", err) e2elog.Logf("Cluster image sources lookup failed: %v\n", err)
return return
} }
Logf("cluster-master-image: %s", masterImg) e2elog.Logf("cluster-master-image: %s", masterImg)
Logf("cluster-node-image: %s", nodeImg) e2elog.Logf("cluster-node-image: %s", nodeImg)
images := map[string]string{ images := map[string]string{
"master_os_image": masterImg, "master_os_image": masterImg,
@ -126,7 +128,7 @@ func LogClusterImageSources() {
outputBytes, _ := json.MarshalIndent(images, "", " ") outputBytes, _ := json.MarshalIndent(images, "", " ")
filePath := filepath.Join(TestContext.ReportDir, "images.json") filePath := filepath.Join(TestContext.ReportDir, "images.json")
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil { if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
Logf("cluster images sources, could not write to %q: %v", filePath, err) e2elog.Logf("cluster images sources, could not write to %q: %v", filePath, err)
} }
} }

View File

@ -18,6 +18,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library", "//test/e2e/manifest:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",

View File

@ -49,6 +49,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/manifest" "k8s.io/kubernetes/test/e2e/manifest"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -134,12 +135,12 @@ type E2ELogger struct{}
// Infof outputs log. // Infof outputs log.
func (l *E2ELogger) Infof(format string, args ...interface{}) { func (l *E2ELogger) Infof(format string, args ...interface{}) {
framework.Logf(format, args...) e2elog.Logf(format, args...)
} }
// Errorf outputs log. // Errorf outputs log.
func (l *E2ELogger) Errorf(format string, args ...interface{}) { func (l *E2ELogger) Errorf(format string, args ...interface{}) {
framework.Logf(format, args...) e2elog.Logf(format, args...)
} }
// ConformanceTests contains a closure with an entry and exit log line. // ConformanceTests contains a closure with an entry and exit log line.
@ -334,7 +335,7 @@ func BuildInsecureClient(timeout time.Duration) *http.Client {
// Ingress, it's updated. // Ingress, it's updated.
func createTLSSecret(kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) { func createTLSSecret(kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) {
host = strings.Join(hosts, ",") host = strings.Join(hosts, ",")
framework.Logf("Generating RSA cert for host %v", host) e2elog.Logf("Generating RSA cert for host %v", host)
cert, key, err := GenerateRSACerts(host, true) cert, key, err := GenerateRSACerts(host, true)
if err != nil { if err != nil {
return return
@ -351,11 +352,11 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin
var s *v1.Secret var s *v1.Secret
if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil { if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though. // TODO: Retry the update. We don't really expect anything to conflict though.
framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host) e2elog.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
s.Data = secret.Data s.Data = secret.Data
_, err = kubeClient.CoreV1().Secrets(namespace).Update(s) _, err = kubeClient.CoreV1().Secrets(namespace).Update(s)
} else { } else {
framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host) e2elog.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
_, err = kubeClient.CoreV1().Secrets(namespace).Create(secret) _, err = kubeClient.CoreV1().Secrets(namespace).Create(secret)
} }
return host, cert, key, err return host, cert, key, err
@ -732,7 +733,7 @@ func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Du
for i := 0; i < iterations; i++ { for i := 0; i < iterations; i++ {
b, err := framework.SimpleGET(httpClient, route, host) b, err := framework.SimpleGET(httpClient, route, host)
if err != nil { if err != nil {
framework.Logf(b) e2elog.Logf(b)
return err return err
} }
j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval) j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval)
@ -843,14 +844,14 @@ func (cont *NginxIngressController) Init() {
read := func(file string) string { read := func(file string) string {
return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file), ginkgo.Fail)) return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file), ginkgo.Fail))
} }
framework.Logf("initializing nginx ingress controller") e2elog.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))
rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{}) rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
cont.rc = rc cont.rc = rc
framework.Logf("waiting for pods with label %v", rc.Spec.Selector) e2elog.Logf("waiting for pods with label %v", rc.Spec.Selector)
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel)) framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel))
pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()})
@ -861,7 +862,7 @@ func (cont *NginxIngressController) Init() {
cont.pod = &pods.Items[0] cont.pod = &pods.Items[0]
cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod) cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP) e2elog.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
} }
func generateBacksideHTTPSIngressSpec(ns string) *networkingv1beta1.Ingress { func generateBacksideHTTPSIngressSpec(ns string) *networkingv1beta1.Ingress {

View File

@ -38,6 +38,7 @@ import (
dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics" dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -183,7 +184,7 @@ func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate() map[string]Node
for node := range m.nodesRuntimeOps { for node := range m.nodesRuntimeOps {
nodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node) nodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
if err != nil { if err != nil {
Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err) e2elog.Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
continue continue
} }
m.nodesRuntimeOps[node] = nodeResult m.nodesRuntimeOps[node] = nodeResult
@ -199,7 +200,7 @@ func (m *RuntimeOperationMonitor) GetLatestRuntimeOperationErrorRate() map[strin
oldNodeResult := m.nodesRuntimeOps[node] oldNodeResult := m.nodesRuntimeOps[node]
curNodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node) curNodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
if err != nil { if err != nil {
Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err) e2elog.Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
continue continue
} }
for op, cur := range curNodeResult { for op, cur := range curNodeResult {
@ -276,7 +277,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
for _, m := range latencyMetrics { for _, m := range latencyMetrics {
if m.Latency > threshold { if m.Latency > threshold {
badMetrics = append(badMetrics, m) badMetrics = append(badMetrics, m)
Logf("%+v", m) e2elog.Logf("%+v", m)
} }
} }
return badMetrics, nil return badMetrics, nil
@ -517,13 +518,13 @@ func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error)
func PrintAllKubeletPods(c clientset.Interface, nodeName string) { func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
podList, err := GetKubeletPods(c, nodeName) podList, err := GetKubeletPods(c, nodeName)
if err != nil { if err != nil {
Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err) e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
return return
} }
for _, p := range podList.Items { for _, p := range podList.Items {
Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses)) e2elog.Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses))
for _, c := range p.Status.ContainerStatuses { for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v", e2elog.Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount) c.Name, c.Ready, c.RestartCount)
} }
} }
@ -583,7 +584,7 @@ func (r *resourceCollector) Stop() {
func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.ContainerStats) { func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.ContainerStats) {
summary, err := getNodeStatsSummary(r.client, r.node) summary, err := getNodeStatsSummary(r.client, r.node)
if err != nil { if err != nil {
Logf("Error getting node stats summary on %q, err: %v", r.node, err) e2elog.Logf("Error getting node stats summary on %q, err: %v", r.node, err)
return return
} }
cStatsMap := getSystemContainerStats(summary) cStatsMap := getSystemContainerStats(summary)
@ -592,7 +593,7 @@ func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.Container
for _, name := range r.containers { for _, name := range r.containers {
cStats, ok := cStatsMap[name] cStats, ok := cStatsMap[name]
if !ok { if !ok {
Logf("Missing info/stats for container %q on node %q", name, r.node) e2elog.Logf("Missing info/stats for container %q on node %q", name, r.node)
return return
} }
@ -710,9 +711,9 @@ func (r *ResourceMonitor) Reset() {
func (r *ResourceMonitor) LogLatest() { func (r *ResourceMonitor) LogLatest() {
summary, err := r.GetLatest() summary, err := r.GetLatest()
if err != nil { if err != nil {
Logf("%v", err) e2elog.Logf("%v", err)
} }
Logf("%s", r.FormatResourceUsage(summary)) e2elog.Logf("%s", r.FormatResourceUsage(summary))
} }
// FormatResourceUsage returns the formatted string for LogLatest(). // FormatResourceUsage returns the formatted string for LogLatest().
@ -824,7 +825,7 @@ func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
// LogCPUSummary outputs summary of CPU into log. // LogCPUSummary outputs summary of CPU into log.
func (r *ResourceMonitor) LogCPUSummary() { func (r *ResourceMonitor) LogCPUSummary() {
summary := r.GetCPUSummary() summary := r.GetCPUSummary()
Logf("%s", r.FormatCPUSummary(summary)) e2elog.Logf("%s", r.FormatCPUSummary(summary))
} }
// GetCPUSummary returns summary of CPU. // GetCPUSummary returns summary of CPU.

View File

@ -26,6 +26,7 @@ import (
"time" "time"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
) )
@ -257,10 +258,10 @@ func (g *LogSizeGatherer) Work() bool {
TestContext.Provider, TestContext.Provider,
) )
if err != nil { if err != nil {
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err) e2elog.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
// In case of repeated error give up. // In case of repeated error give up.
if workItem.backoffMultiplier >= 128 { if workItem.backoffMultiplier >= 128 {
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip) e2elog.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
g.wg.Done() g.wg.Done()
return false return false
} }
@ -276,7 +277,7 @@ func (g *LogSizeGatherer) Work() bool {
path := results[i] path := results[i]
size, err := strconv.Atoi(results[i+1]) size, err := strconv.Atoi(results[i+1])
if err != nil { if err != nil {
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err) e2elog.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
continue continue
} }
g.data.addNewData(workItem.ip, path, now, size) g.data.addNewData(workItem.ip, path, now, size)

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
schedulermetric "k8s.io/kubernetes/pkg/scheduler/metrics" schedulermetric "k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@ -325,7 +326,7 @@ func NewEtcdMetricsCollector() *EtcdMetricsCollector {
func getEtcdMetrics() ([]*model.Sample, error) { func getEtcdMetrics() ([]*model.Sample, error) {
// Etcd is only exposed on localhost level. We are using ssh method // Etcd is only exposed on localhost level. We are using ssh method
if TestContext.Provider == "gke" || TestContext.Provider == "eks" { if TestContext.Provider == "gke" || TestContext.Provider == "eks" {
Logf("Not grabbing etcd metrics through master SSH: unsupported for %s", TestContext.Provider) e2elog.Logf("Not grabbing etcd metrics through master SSH: unsupported for %s", TestContext.Provider)
return nil, nil return nil, nil
} }
@ -363,7 +364,7 @@ func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
case <-time.After(interval): case <-time.After(interval):
dbSize, err := getEtcdDatabaseSize() dbSize, err := getEtcdDatabaseSize()
if err != nil { if err != nil {
Logf("Failed to collect etcd database size") e2elog.Logf("Failed to collect etcd database size")
continue continue
} }
mc.metrics.MaxDatabaseSize = math.Max(mc.metrics.MaxDatabaseSize, dbSize) mc.metrics.MaxDatabaseSize = math.Max(mc.metrics.MaxDatabaseSize, dbSize)
@ -573,7 +574,7 @@ func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIRespons
if isBad { if isBad {
prefix = "WARNING " prefix = "WARNING "
} }
Logf("%vTop latency metric: %+v", prefix, metrics.APICalls[i]) e2elog.Logf("%vTop latency metric: %+v", prefix, metrics.APICalls[i])
} }
} }
return badMetrics, metrics, nil return badMetrics, metrics, nil
@ -596,7 +597,7 @@ func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName st
// ResetMetrics resets latency metrics in apiserver. // ResetMetrics resets latency metrics in apiserver.
func ResetMetrics(c clientset.Interface) error { func ResetMetrics(c clientset.Interface) error {
Logf("Resetting latency metrics in apiserver...") e2elog.Logf("Resetting latency metrics in apiserver...")
body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw() body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw()
if err != nil { if err != nil {
return err return err
@ -652,7 +653,7 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
} else { } else {
// If master is not registered fall back to old method of using SSH. // If master is not registered fall back to old method of using SSH.
if TestContext.Provider == "gke" || TestContext.Provider == "eks" { if TestContext.Provider == "gke" || TestContext.Provider == "eks" {
Logf("Not grabbing scheduler metrics through master SSH: unsupported for %s", TestContext.Provider) e2elog.Logf("Not grabbing scheduler metrics through master SSH: unsupported for %s", TestContext.Provider)
return "", nil return "", nil
} }
@ -751,12 +752,12 @@ func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
func PrettyPrintJSON(metrics interface{}) string { func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{} output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil { if err := json.NewEncoder(output).Encode(metrics); err != nil {
Logf("Error building encoder: %v", err) e2elog.Logf("Error building encoder: %v", err)
return "" return ""
} }
formatted := &bytes.Buffer{} formatted := &bytes.Buffer{}
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil { if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
Logf("Error indenting: %v", err) e2elog.Logf("Error indenting: %v", err)
return "" return ""
} }
return string(formatted.Bytes()) return string(formatted.Bytes())
@ -819,18 +820,18 @@ func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLate
} }
for _, l := range latencyData { for _, l := range latencyData {
if l.Latency > NodeStartupThreshold { if l.Latency > NodeStartupThreshold {
HighLatencyKubeletOperations(c, 1*time.Second, l.Node, Logf) HighLatencyKubeletOperations(c, 1*time.Second, l.Node, e2elog.Logf)
} }
} }
Logf("Approx throughput: %v pods/min", e2elog.Logf("Approx throughput: %v pods/min",
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes())) float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
} }
// PrintLatencies outputs latencies to log with readable format. // PrintLatencies outputs latencies to log with readable format.
func PrintLatencies(latencies []PodLatencyData, header string) { func PrintLatencies(latencies []PodLatencyData, header string) {
metrics := ExtractLatencyMetrics(latencies) metrics := ExtractLatencyMetrics(latencies)
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:]) e2elog.Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99) e2elog.Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
} }
func (m *MetricsForE2E) computeClusterAutoscalerMetricsDelta(before metrics.Collection) { func (m *MetricsForE2E) computeClusterAutoscalerMetricsDelta(before metrics.Collection) {

View File

@ -38,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1" coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -157,10 +158,10 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
if foundEndpoints.Has(e.Name) { if foundEndpoints.Has(e.Name) {
continue continue
} }
Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name) e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := RunKubectl( desc, _ := RunKubectl(
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace)) "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
Logf(desc) e2elog.Logf(desc)
} }
} }
@ -205,11 +206,11 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
// A failure to kubectl exec counts as a try, not a hard fail. // A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr) e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
} else { } else {
var output map[string][]string var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil { if err := json.Unmarshal([]byte(stdout), &output); err != nil {
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v", e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err) cmd, config.HostTestContainerPod.Name, stdout, err)
continue continue
} }
@ -221,7 +222,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
} }
} }
} }
Logf("Waiting for endpoints: %v", expectedEps.Difference(eps)) e2elog.Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
// Check against i+1 so we exit if minTries == maxTries. // Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries { if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
@ -264,12 +265,12 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe
// A failure to kubectl exec counts as a try, not a hard fail. // A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr) e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
} else { } else {
Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod) e2elog.Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
var output map[string][]string var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil { if err := json.Unmarshal([]byte(stdout), &output); err != nil {
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v", e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err) cmd, config.HostTestContainerPod.Name, stdout, err)
continue continue
} }
@ -323,7 +324,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// A failure to exec command counts as a try, not a hard fail. // A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr) e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
} else { } else {
trimmed := strings.TrimSpace(stdout) trimmed := strings.TrimSpace(stdout)
if trimmed != "" { if trimmed != "" {
@ -333,11 +334,11 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// Check against i+1 so we exit if minTries == maxTries. // Check against i+1 so we exit if minTries == maxTries.
if eps.Equal(expectedEps) && i+1 >= minTries { if eps.Equal(expectedEps) && i+1 >= minTries {
Logf("Found all expected endpoints: %+v", eps.List()) e2elog.Logf("Found all expected endpoints: %+v", eps.List())
return return
} }
Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List()) e2elog.Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
// TODO: get rid of this delay #36281 // TODO: get rid of this delay #36281
time.Sleep(hitEndpointRetryDelay) time.Sleep(hitEndpointRetryDelay)
@ -377,20 +378,20 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
stdout, err := RunHostCmd(config.Namespace, podName, cmd) stdout, err := RunHostCmd(config.Namespace, podName, cmd)
if err != nil { if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err) msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
Logf(msg) e2elog.Logf(msg)
return false, nil return false, nil
} }
if !strings.Contains(stdout, expected) { if !strings.Contains(stdout, expected) {
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected) msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
Logf(msg) e2elog.Logf(msg)
return false, nil return false, nil
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName) e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := RunKubectl( desc, _ := RunKubectl(
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace)) "describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
Logf("%s", desc) e2elog.Logf("%s", desc)
Failf("Timed out in %v: %v", retryTimeout, msg) Failf("Timed out in %v: %v", retryTimeout, msg)
} }
} }
@ -708,12 +709,12 @@ func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n
err := wait.PollImmediate(Poll, timeout, func() (bool, error) { err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
_, err := RunHostCmd(namespace, pod, cmd) _, err := RunHostCmd(namespace, pod, cmd)
if expectToBeReachable && err != nil { if expectToBeReachable && err != nil {
Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err) e2elog.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
return false, nil return false, nil
} }
if !expectToBeReachable && err == nil { if !expectToBeReachable && err == nil {
Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout") e2elog.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
return false, nil return false, nil
} }
return true, nil return true, nil
@ -797,7 +798,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
params.ExpectCode = http.StatusOK params.ExpectCode = http.StatusOK
} }
Logf("Poking %q", url) e2elog.Logf("Poking %q", url)
resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout) resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout)
if err != nil { if err != nil {
@ -810,7 +811,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
} else { } else {
ret.Status = HTTPError ret.Status = HTTPError
} }
Logf("Poke(%q): %v", url, err) e2elog.Logf("Poke(%q): %v", url, err)
return ret return ret
} }
@ -821,7 +822,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if err != nil { if err != nil {
ret.Status = HTTPError ret.Status = HTTPError
ret.Error = fmt.Errorf("error reading HTTP body: %v", err) ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
Logf("Poke(%q): %v", url, ret.Error) e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
ret.Body = make([]byte, len(body)) ret.Body = make([]byte, len(body))
@ -832,25 +833,25 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if resp.StatusCode == code { if resp.StatusCode == code {
ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode) ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode)
ret.Status = HTTPRetryCode ret.Status = HTTPRetryCode
Logf("Poke(%q): %v", url, ret.Error) e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
} }
ret.Status = HTTPWrongCode ret.Status = HTTPWrongCode
ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode) ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode)
Logf("Poke(%q): %v", url, ret.Error) e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) { if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) {
ret.Status = HTTPBadResponse ret.Status = HTTPBadResponse
ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body)) ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body))
Logf("Poke(%q): %v", url, ret.Error) e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
ret.Status = HTTPSuccess ret.Status = HTTPSuccess
Logf("Poke(%q): success", url) e2elog.Logf("Poke(%q): success", url)
return ret return ret
} }
@ -930,13 +931,13 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
params = &UDPPokeParams{} params = &UDPPokeParams{}
} }
Logf("Poking %v", url) e2elog.Logf("Poking %v", url)
con, err := net.Dial("udp", hostPort) con, err := net.Dial("udp", hostPort)
if err != nil { if err != nil {
ret.Status = UDPError ret.Status = UDPError
ret.Error = err ret.Error = err
Logf("Poke(%q): %v", url, err) e2elog.Logf("Poke(%q): %v", url, err)
return ret return ret
} }
@ -951,7 +952,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
} else { } else {
ret.Status = UDPError ret.Status = UDPError
} }
Logf("Poke(%q): %v", url, err) e2elog.Logf("Poke(%q): %v", url, err)
return ret return ret
} }
@ -960,7 +961,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
if err != nil { if err != nil {
ret.Status = UDPError ret.Status = UDPError
ret.Error = err ret.Error = err
Logf("Poke(%q): %v", url, err) e2elog.Logf("Poke(%q): %v", url, err)
return ret return ret
} }
} }
@ -981,7 +982,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
} else { } else {
ret.Status = UDPError ret.Status = UDPError
} }
Logf("Poke(%q): %v", url, err) e2elog.Logf("Poke(%q): %v", url, err)
return ret return ret
} }
ret.Response = buf[0:n] ret.Response = buf[0:n]
@ -989,12 +990,12 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
if params.Response != "" && string(ret.Response) != params.Response { if params.Response != "" && string(ret.Response) != params.Response {
ret.Status = UDPBadResponse ret.Status = UDPBadResponse
ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response)) ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response))
Logf("Poke(%q): %v", url, ret.Error) e2elog.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
ret.Status = UDPSuccess ret.Status = UDPSuccess
Logf("Poke(%q): success", url) e2elog.Logf("Poke(%q): success", url)
return ret return ret
} }
@ -1006,7 +1007,7 @@ func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Dur
// TestHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count. // TestHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.
func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String, func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
countToSucceed int) error { countToSucceed int) error {
Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed) e2elog.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
hittedHosts := sets.NewString() hittedHosts := sets.NewString()
count := 0 count := 0
condition := func() (bool, error) { condition := func() (bool, error) {
@ -1017,13 +1018,13 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
hittedHost := strings.TrimSpace(string(result.Body)) hittedHost := strings.TrimSpace(string(result.Body))
if !expectedHosts.Has(hittedHost) { if !expectedHosts.Has(hittedHost) {
Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count) e2elog.Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
count = 0 count = 0
return false, nil return false, nil
} }
if !hittedHosts.Has(hittedHost) { if !hittedHosts.Has(hittedHost) {
hittedHosts.Insert(hittedHost) hittedHosts.Insert(hittedHost)
Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts) e2elog.Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
} }
if hittedHosts.Equal(expectedHosts) { if hittedHosts.Equal(expectedHosts) {
count++ count++
@ -1063,7 +1064,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
} }
}() }()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) { if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
@ -1071,7 +1072,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
BlockNetwork(host, masterAddress) BlockNetwork(host, masterAddress)
} }
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) { if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout) Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
} }

View File

@ -28,6 +28,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
) )
@ -109,7 +110,7 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
} }
func masterUpgradeGKE(v string) error { func masterUpgradeGKE(v string) error {
Logf("Upgrading master to %q", v) e2elog.Logf("Upgrading master to %q", v)
args := []string{ args := []string{
"container", "container",
"clusters", "clusters",
@ -132,7 +133,7 @@ func masterUpgradeGKE(v string) error {
} }
func masterUpgradeKubernetesAnywhere(v string) error { func masterUpgradeKubernetesAnywhere(v string) error {
Logf("Upgrading master to %q", v) e2elog.Logf("Upgrading master to %q", v)
kaPath := TestContext.KubernetesAnywherePath kaPath := TestContext.KubernetesAnywherePath
originalConfigPath := filepath.Join(kaPath, ".config") originalConfigPath := filepath.Join(kaPath, ".config")
@ -150,7 +151,7 @@ func masterUpgradeKubernetesAnywhere(v string) error {
defer func() { defer func() {
// revert .config.bak to .config // revert .config.bak to .config
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil { if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath) e2elog.Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
} }
}() }()
@ -205,7 +206,7 @@ func waitForNodesReadyAfterUpgrade(f *Framework) error {
if err != nil { if err != nil {
return fmt.Errorf("couldn't detect number of nodes") return fmt.Errorf("couldn't detect number of nodes")
} }
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes) e2elog.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil { if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
return err return err
} }
@ -226,7 +227,7 @@ func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
} }
func nodeUpgradeGKE(v string, img string) error { func nodeUpgradeGKE(v string, img string) error {
Logf("Upgrading nodes to version %q and image %q", v, img) e2elog.Logf("Upgrading nodes to version %q and image %q", v, img)
args := []string{ args := []string{
"container", "container",
"clusters", "clusters",
@ -277,7 +278,7 @@ func MigTemplate() (string, error) {
if val := ParseKVLines(output, key); len(val) > 0 { if val := ParseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/") url := strings.Split(val, "/")
templ = url[len(url)-1] templ = url[len(url)-1]
Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ) e2elog.Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil return true, nil
} }
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output) errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
@ -296,7 +297,7 @@ func gceUpgradeScript() string {
} }
func waitForSSHTunnels() { func waitForSSHTunnels() {
Logf("Waiting for SSH tunnels to establish") e2elog.Logf("Waiting for SSH tunnels to establish")
RunKubectl("run", "ssh-tunnel-test", RunKubectl("run", "ssh-tunnel-test",
"--image=busybox", "--image=busybox",
"--restart=Never", "--restart=Never",
@ -351,19 +352,19 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
go func() { go func() {
defer wg.Done() defer wg.Done()
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name) e2elog.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node) err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
if err != nil { if err != nil {
Logf("ERROR while stopping node %q: %v", node.Name, err) e2elog.Logf("ERROR while stopping node %q: %v", node.Name, err)
return return
} }
time.Sleep(k.config.SimulatedDowntime) time.Sleep(k.config.SimulatedDowntime)
Logf("Rebooting %q to repair the node", node.Name) e2elog.Logf("Rebooting %q to repair the node", node.Name)
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node) err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
if err != nil { if err != nil {
Logf("ERROR while rebooting node %q: %v", node.Name, err) e2elog.Logf("ERROR while rebooting node %q: %v", node.Name, err)
return return
} }
}() }()

View File

@ -19,6 +19,7 @@ package framework
import ( import (
"fmt" "fmt"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/perftype" "k8s.io/kubernetes/test/e2e/perftype"
) )
@ -100,7 +101,7 @@ func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
func PrintPerfData(p *perftype.PerfData) { func PrintPerfData(p *perftype.PerfData) {
// Notice that we must make sure the perftype.PerfResultEnd is in a new line. // Notice that we must make sure the perftype.PerfResultEnd is in a new line.
if str := PrettyPrintJSON(p); str != "" { if str := PrettyPrintJSON(p); str != "" {
Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd) e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
} }
} }

View File

@ -33,6 +33,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl" "k8s.io/kubernetes/pkg/kubelet/sysctl"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -149,11 +150,11 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
updateFn(pod) updateFn(pod)
_, err = c.PodInterface.Update(pod) _, err = c.PodInterface.Update(pod)
if err == nil { if err == nil {
Logf("Successfully updated pod %q", name) e2elog.Logf("Successfully updated pod %q", name)
return true, nil return true, nil
} }
if errors.IsConflict(err) { if errors.IsConflict(err) {
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err) e2elog.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil return false, nil
} }
return false, fmt.Errorf("failed to update pod %q: %v", name, err) return false, fmt.Errorf("failed to update pod %q: %v", name, err)

View File

@ -26,6 +26,7 @@ import (
"sync" "sync"
"time" "time"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
) )
@ -181,7 +182,7 @@ func GatherCPUProfileForSeconds(componentName string, profileBaseName string, se
defer wg.Done() defer wg.Done()
} }
if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil { if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil {
Logf("Failed to gather %v CPU profile: %v", componentName, err) e2elog.Logf("Failed to gather %v CPU profile: %v", componentName, err)
} }
} }
@ -191,7 +192,7 @@ func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.
defer wg.Done() defer wg.Done()
} }
if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil { if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil {
Logf("Failed to gather %v memory profile: %v", componentName, err) e2elog.Logf("Failed to gather %v memory profile: %v", componentName, err)
} }
} }

View File

@ -9,6 +9,7 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/aws:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/aws:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",

View File

@ -28,6 +28,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
awscloud "k8s.io/legacy-cloud-providers/aws" awscloud "k8s.io/legacy-cloud-providers/aws"
) )
@ -114,7 +115,7 @@ func (p *Provider) DeletePD(pdName string) error {
_, err := client.DeleteVolume(request) _, err := client.DeleteVolume(request)
if err != nil { if err != nil {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" { if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName) e2elog.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else { } else {
return fmt.Errorf("error deleting EBS volumes: %v", err) return fmt.Errorf("error deleting EBS volumes: %v", err)
} }
@ -144,7 +145,7 @@ func newAWSClient(zone string) *ec2.EC2 {
zone = framework.TestContext.CloudConfig.Zone zone = framework.TestContext.CloudConfig.Zone
} }
if zone == "" { if zone == "" {
framework.Logf("Warning: No AWS zone configured!") e2elog.Logf("Warning: No AWS zone configured!")
cfg = nil cfg = nil
} else { } else {
region := zone[:len(zone)-1] region := zone[:len(zone)-1]

View File

@ -10,6 +10,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/azure:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
], ],
) )

View File

@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/legacy-cloud-providers/azure" "k8s.io/legacy-cloud-providers/azure"
) )
@ -37,7 +38,7 @@ func newProvider() (framework.ProviderInterface, error) {
} }
config, err := os.Open(framework.TestContext.CloudConfig.ConfigFile) config, err := os.Open(framework.TestContext.CloudConfig.ConfigFile)
if err != nil { if err != nil {
framework.Logf("Couldn't open cloud provider configuration %s: %#v", e2elog.Logf("Couldn't open cloud provider configuration %s: %#v",
framework.TestContext.CloudConfig.ConfigFile, err) framework.TestContext.CloudConfig.ConfigFile, err)
} }
defer config.Close() defer config.Close()
@ -72,7 +73,7 @@ func (p *Provider) CreatePD(zone string) (string, error) {
// DeletePD deletes a persistent volume // DeletePD deletes a persistent volume
func (p *Provider) DeletePD(pdName string) error { func (p *Provider) DeletePD(pdName string) error {
if err := p.azureCloud.DeleteVolume(pdName); err != nil { if err := p.azureCloud.DeleteVolume(pdName); err != nil {
framework.Logf("failed to delete Azure volume %q: %v", pdName, err) e2elog.Logf("failed to delete Azure volume %q: %v", pdName, err)
return err return err
} }
return nil return nil

View File

@ -23,6 +23,7 @@ go_library(
"//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library",

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
gcecloud "k8s.io/legacy-cloud-providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce"
) )
@ -395,7 +396,7 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
// WaitForFirewallRule waits for the specified firewall existence // WaitForFirewallRule waits for the specified firewall existence
func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) { func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
framework.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist) e2elog.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
var fw *compute.Firewall var fw *compute.Firewall
var err error var err error

View File

@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
gcecloud "k8s.io/legacy-cloud-providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce"
) )
@ -41,7 +42,7 @@ func init() {
} }
func factory() (framework.ProviderInterface, error) { func factory() (framework.ProviderInterface, error) {
framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider) e2elog.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
zone := framework.TestContext.CloudConfig.Zone zone := framework.TestContext.CloudConfig.Zone
region := framework.TestContext.CloudConfig.Region region := framework.TestContext.CloudConfig.Region
@ -175,7 +176,7 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) erro
} }
for _, item := range list.Items { for _, item := range list.Items {
if item.PortRange == portRange && item.IPAddress == ip { if item.PortRange == portRange && item.IPAddress == ip {
framework.Logf("found a load balancer: %v", item) e2elog.Logf("found a load balancer: %v", item)
return false, nil return false, nil
} }
} }
@ -229,7 +230,7 @@ func (p *Provider) DeletePD(pdName string) error {
return nil return nil
} }
framework.Logf("error deleting PD %q: %v", pdName, err) e2elog.Logf("error deleting PD %q: %v", pdName, err)
} }
return err return err
} }
@ -256,7 +257,7 @@ func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
if pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) { if pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil { if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
framework.Logf("Still waiting for glbc to cleanup: %v", err) e2elog.Logf("Still waiting for glbc to cleanup: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -347,7 +348,7 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
if err != nil { if err != nil {
framework.Failf("failed to set instance tags: %v", err) framework.Failf("failed to set instance tags: %v", err)
} }
framework.Logf("Sent request to set tags %v on instance: %v", tags, instanceName) e2elog.Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
return resTags.Items return resTags.Items
} }
@ -355,7 +356,7 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string { func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string {
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) == 0 { if len(nodes.Items) == 0 {
framework.Logf("GetNodeTags: Found 0 node.") e2elog.Logf("GetNodeTags: Found 0 node.")
return []string{} return []string{}
} }
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items

View File

@ -34,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
utilexec "k8s.io/utils/exec" utilexec "k8s.io/utils/exec"
) )
@ -87,7 +88,7 @@ func (cont *IngressController) CleanupIngressController() error {
func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.Duration) error { func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.Duration) error {
pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) { pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
if err := cont.Cleanup(false); err != nil { if err := cont.Cleanup(false); err != nil {
framework.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err) e2elog.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -108,7 +109,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.
// throw out confusing events. // throw out confusing events.
if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
if err := cont.deleteStaticIPs(); err != nil { if err := cont.deleteStaticIPs(); err != nil {
framework.Logf("Failed to delete static-ip: %v\n", err) e2elog.Logf("Failed to delete static-ip: %v\n", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -127,7 +128,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.
} }
func (cont *IngressController) getL7AddonUID() (string, error) { func (cont *IngressController) getL7AddonUID() (string, error) {
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap) e2elog.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
if err != nil { if err != nil {
return "", err return "", err
@ -296,7 +297,7 @@ func (cont *IngressController) deleteURLMap(del bool) (msg string) {
continue continue
} }
if del { if del {
framework.Logf("Deleting url-map: %s", um.Name) e2elog.Logf("Deleting url-map: %s", um.Name)
if err := gceCloud.DeleteURLMap(um.Name); err != nil && if err := gceCloud.DeleteURLMap(um.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) { !cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete url map %v\n", um.Name) msg += fmt.Sprintf("Failed to delete url map %v\n", um.Name)
@ -332,7 +333,7 @@ func (cont *IngressController) deleteBackendService(del bool) (msg string) {
return fmt.Sprintf("Failed to list backend services: %v", err) return fmt.Sprintf("Failed to list backend services: %v", err)
} }
if len(beList) == 0 { if len(beList) == 0 {
framework.Logf("No backend services found") e2elog.Logf("No backend services found")
return msg return msg
} }
for _, be := range beList { for _, be := range beList {
@ -340,7 +341,7 @@ func (cont *IngressController) deleteBackendService(del bool) (msg string) {
continue continue
} }
if del { if del {
framework.Logf("Deleting backed-service: %s", be.Name) e2elog.Logf("Deleting backed-service: %s", be.Name)
if err := gceCloud.DeleteGlobalBackendService(be.Name); err != nil && if err := gceCloud.DeleteGlobalBackendService(be.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) { !cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete backend service %v: %v\n", be.Name, err) msg += fmt.Sprintf("Failed to delete backend service %v: %v\n", be.Name, err)
@ -369,7 +370,7 @@ func (cont *IngressController) deleteHTTPHealthCheck(del bool) (msg string) {
continue continue
} }
if del { if del {
framework.Logf("Deleting http-health-check: %s", hc.Name) e2elog.Logf("Deleting http-health-check: %s", hc.Name)
if err := gceCloud.DeleteHTTPHealthCheck(hc.Name); err != nil && if err := gceCloud.DeleteHTTPHealthCheck(hc.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) { !cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete HTTP health check %v\n", hc.Name) msg += fmt.Sprintf("Failed to delete HTTP health check %v\n", hc.Name)
@ -410,7 +411,7 @@ func (cont *IngressController) deleteSSLCertificate(del bool) (msg string) {
continue continue
} }
if del { if del {
framework.Logf("Deleting ssl-certificate: %s", s.Name) e2elog.Logf("Deleting ssl-certificate: %s", s.Name)
if err := gceCloud.DeleteSslCertificate(s.Name); err != nil && if err := gceCloud.DeleteSslCertificate(s.Name); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) { !cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete ssl certificates: %v\n", s.Name) msg += fmt.Sprintf("Failed to delete ssl certificates: %v\n", s.Name)
@ -456,7 +457,7 @@ func (cont *IngressController) deleteInstanceGroup(del bool) (msg string) {
continue continue
} }
if del { if del {
framework.Logf("Deleting instance-group: %s", ig.Name) e2elog.Logf("Deleting instance-group: %s", ig.Name)
if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil && if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) { !cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name) msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name)
@ -478,7 +479,7 @@ func (cont *IngressController) deleteNetworkEndpointGroup(del bool) (msg string)
return msg return msg
} }
// Do not return error as NEG is still alpha. // Do not return error as NEG is still alpha.
framework.Logf("Failed to list network endpoint group: %v", err) e2elog.Logf("Failed to list network endpoint group: %v", err)
return msg return msg
} }
if len(negList) == 0 { if len(negList) == 0 {
@ -489,7 +490,7 @@ func (cont *IngressController) deleteNetworkEndpointGroup(del bool) (msg string)
continue continue
} }
if del { if del {
framework.Logf("Deleting network-endpoint-group: %s", neg.Name) e2elog.Logf("Deleting network-endpoint-group: %s", neg.Name)
if err := gceCloud.DeleteNetworkEndpointGroup(neg.Name, cont.Cloud.Zone); err != nil && if err := gceCloud.DeleteNetworkEndpointGroup(neg.Name, cont.Cloud.Zone); err != nil &&
!cont.isHTTPErrorCode(err, http.StatusNotFound) { !cont.isHTTPErrorCode(err, http.StatusNotFound) {
msg += fmt.Sprintf("Failed to delete network endpoint group %v\n", neg.Name) msg += fmt.Sprintf("Failed to delete network endpoint group %v\n", neg.Name)
@ -557,11 +558,11 @@ func (cont *IngressController) canDeleteNEG(resourceName, creationTimestamp stri
func canDeleteWithTimestamp(resourceName, creationTimestamp string) bool { func canDeleteWithTimestamp(resourceName, creationTimestamp string) bool {
createdTime, err := time.Parse(time.RFC3339, creationTimestamp) createdTime, err := time.Parse(time.RFC3339, creationTimestamp)
if err != nil { if err != nil {
framework.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err) e2elog.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err)
return false return false
} }
if time.Since(createdTime) > maxAge { if time.Since(createdTime) > maxAge {
framework.Logf("%v created on %v IS too old", resourceName, creationTimestamp) e2elog.Logf("%v created on %v IS too old", resourceName, creationTimestamp)
return true return true
} }
return false return false
@ -620,7 +621,7 @@ func (cont *IngressController) WaitForNegBackendService(svcPorts map[string]v1.S
return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
err := cont.verifyBackendMode(svcPorts, negBackend) err := cont.verifyBackendMode(svcPorts, negBackend)
if err != nil { if err != nil {
framework.Logf("Err while checking if backend service is using NEG: %v", err) e2elog.Logf("Err while checking if backend service is using NEG: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -632,7 +633,7 @@ func (cont *IngressController) WaitForIgBackendService(svcPorts map[string]v1.Se
return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) { return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
err := cont.verifyBackendMode(svcPorts, igBackend) err := cont.verifyBackendMode(svcPorts, igBackend)
if err != nil { if err != nil {
framework.Logf("Err while checking if backend service is using IG: %v", err) e2elog.Logf("Err while checking if backend service is using IG: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -766,9 +767,9 @@ func (cont *IngressController) Init() error {
// There's a name limit imposed by GCE. The controller will truncate. // There's a name limit imposed by GCE. The controller will truncate.
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID) testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID)
if len(testName) > nameLenLimit { if len(testName) > nameLenLimit {
framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit) e2elog.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
} else { } else {
framework.Logf("Detected cluster UID %v", cont.UID) e2elog.Logf("Detected cluster UID %v", cont.UID)
} }
return nil return nil
} }
@ -782,9 +783,9 @@ func (cont *IngressController) CreateStaticIP(name string) string {
if err := gceCloud.ReserveGlobalAddress(addr); err != nil { if err := gceCloud.ReserveGlobalAddress(addr); err != nil {
if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil { if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil {
if cont.isHTTPErrorCode(delErr, http.StatusNotFound) { if cont.isHTTPErrorCode(delErr, http.StatusNotFound) {
framework.Logf("Static ip with name %v was not allocated, nothing to delete", name) e2elog.Logf("Static ip with name %v was not allocated, nothing to delete", name)
} else { } else {
framework.Logf("Failed to delete static ip %v: %v", name, delErr) e2elog.Logf("Failed to delete static ip %v: %v", name, delErr)
} }
} }
framework.Failf("Failed to allocate static ip %v: %v", name, err) framework.Failf("Failed to allocate static ip %v: %v", name, err)
@ -796,7 +797,7 @@ func (cont *IngressController) CreateStaticIP(name string) string {
} }
cont.staticIPName = ip.Name cont.staticIPName = ip.Name
framework.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address) e2elog.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address)
return ip.Address return ip.Address
} }
@ -816,7 +817,7 @@ func (cont *IngressController) deleteStaticIPs() error {
for _, ip := range e2eIPs { for _, ip := range e2eIPs {
ips = append(ips, ip.Name) ips = append(ips, ip.Name)
} }
framework.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", ")) e2elog.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", "))
} }
return nil return nil
} }
@ -842,32 +843,32 @@ func gcloudComputeResourceList(resource, regex, project string, out interface{})
errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr)) errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr))
} }
} }
framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg) e2elog.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg)
} }
if err := json.Unmarshal([]byte(output), out); err != nil { if err := json.Unmarshal([]byte(output), out); err != nil {
framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output)) e2elog.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
} }
} }
// GcloudComputeResourceDelete deletes the specified compute resource by name and project. // GcloudComputeResourceDelete deletes the specified compute resource by name and project.
func GcloudComputeResourceDelete(resource, name, project string, args ...string) error { func GcloudComputeResourceDelete(resource, name, project string, args ...string) error {
framework.Logf("Deleting %v: %v", resource, name) e2elog.Logf("Deleting %v: %v", resource, name)
argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...) argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...)
output, err := exec.Command("gcloud", argList...).CombinedOutput() output, err := exec.Command("gcloud", argList...).CombinedOutput()
if err != nil { if err != nil {
framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err) e2elog.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
} }
return err return err
} }
// GcloudComputeResourceCreate creates a compute resource with a name and arguments. // GcloudComputeResourceCreate creates a compute resource with a name and arguments.
func GcloudComputeResourceCreate(resource, name, project string, args ...string) error { func GcloudComputeResourceCreate(resource, name, project string, args ...string) error {
framework.Logf("Creating %v in project %v: %v", resource, project, name) e2elog.Logf("Creating %v in project %v: %v", resource, project, name)
argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...) argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...)
framework.Logf("Running command: gcloud %+v", strings.Join(argsList, " ")) e2elog.Logf("Running command: gcloud %+v", strings.Join(argsList, " "))
output, err := exec.Command("gcloud", argsList...).CombinedOutput() output, err := exec.Command("gcloud", argsList...).CombinedOutput()
if err != nil { if err != nil {
framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err) e2elog.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
} }
return err return err
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -52,7 +53,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes)) e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything()) ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything())
allPods := ps.List() allPods := ps.List()
@ -77,7 +78,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, e := range events.Items { for _, e := range events.Items {
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
} }
} }
if ps != nil { if ps != nil {
@ -104,7 +105,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
nodesAfter, err := framework.CheckNodesReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout) nodesAfter, err := framework.CheckNodesReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter)) e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
if len(nodes) != len(nodesAfter) { if len(nodes) != len(nodesAfter) {
framework.Failf("Had %d nodes before nodes were recreated, but now only have %d", framework.Failf("Had %d nodes before nodes were recreated, but now only have %d",

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
// RecreateNodes recreates the given nodes in a managed instance group. // RecreateNodes recreates the given nodes in a managed instance group.
@ -61,7 +62,7 @@ func RecreateNodes(c clientset.Interface, nodes []v1.Node) error {
args = append(args, fmt.Sprintf("--instances=%s", strings.Join(nodeNames, ","))) args = append(args, fmt.Sprintf("--instances=%s", strings.Join(nodeNames, ",")))
args = append(args, fmt.Sprintf("--zone=%s", zone)) args = append(args, fmt.Sprintf("--zone=%s", zone))
framework.Logf("Recreating instance group %s.", instanceGroup) e2elog.Logf("Recreating instance group %s.", instanceGroup)
stdout, stderr, err := framework.RunCmd("gcloud", args...) stdout, stderr, err := framework.RunCmd("gcloud", args...)
if err != nil { if err != nil {
return fmt.Errorf("error recreating nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr) return fmt.Errorf("error recreating nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
@ -78,7 +79,7 @@ func WaitForNodeBootIdsToChange(c clientset.Interface, nodes []v1.Node, timeout
if err := wait.Poll(30*time.Second, timeout, func() (bool, error) { if err := wait.Poll(30*time.Second, timeout, func() (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second) e2elog.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second)
return false, nil return false, nil
} }
return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
"k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/auth"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -82,13 +83,13 @@ func IsPodSecurityPolicyEnabled(f *Framework) bool {
isPSPEnabledOnce.Do(func() { isPSPEnabledOnce.Do(func() {
psps, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{}) psps, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
if err != nil { if err != nil {
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err) e2elog.Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
isPSPEnabled = false isPSPEnabled = false
} else if psps == nil || len(psps.Items) == 0 { } else if psps == nil || len(psps.Items) == 0 {
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.") e2elog.Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
isPSPEnabled = false isPSPEnabled = false
} else { } else {
Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.") e2elog.Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.")
isPSPEnabled = true isPSPEnabled = true
} }
}) })

View File

@ -31,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -116,7 +117,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err)) errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
} }
} else { } else {
Logf("pvc is nil") e2elog.Logf("pvc is nil")
} }
if pv != nil { if pv != nil {
err := DeletePersistentVolume(c, pv.Name) err := DeletePersistentVolume(c, pv.Name)
@ -124,7 +125,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err)) errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
} }
} else { } else {
Logf("pv is nil") e2elog.Logf("pv is nil")
} }
return errs return errs
} }
@ -158,7 +159,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
// DeletePersistentVolume deletes the PV. // DeletePersistentVolume deletes the PV.
func DeletePersistentVolume(c clientset.Interface, pvName string) error { func DeletePersistentVolume(c clientset.Interface, pvName string) error {
if c != nil && len(pvName) > 0 { if c != nil && len(pvName) > 0 {
Logf("Deleting PersistentVolume %q", pvName) e2elog.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil) err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
if err != nil && !apierrs.IsNotFound(err) { if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err) return fmt.Errorf("PV Delete API error: %v", err)
@ -170,7 +171,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
// DeletePersistentVolumeClaim deletes the Claim. // DeletePersistentVolumeClaim deletes the Claim.
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error { func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
if c != nil && len(pvcName) > 0 { if c != nil && len(pvcName) > 0 {
Logf("Deleting PersistentVolumeClaim %q", pvcName) e2elog.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil) err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
if err != nil && !apierrs.IsNotFound(err) { if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err) return fmt.Errorf("PVC Delete API error: %v", err)
@ -184,14 +185,14 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// phase value to expect for the pv bound to the to-be-deleted claim. // phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error { func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName pvname := pvc.Spec.VolumeName
Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname) e2elog.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns) err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
if err != nil { if err != nil {
return err return err
} }
// Wait for the PV's phase to return to be `expectPVPhase` // Wait for the PV's phase to return to be `expectPVPhase`
Logf("Waiting for reclaim process to complete.") e2elog.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, Poll, PVReclaimingTimeout) err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, Poll, PVReclaimingTimeout)
if err != nil { if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err) return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
@ -216,7 +217,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
} }
} }
Logf("PV %v now in %q phase", pv.Name, expectPVPhase) e2elog.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
return nil return nil
} }
@ -333,7 +334,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind { if preBind {
preBindMsg = " pre-bound" preBindMsg = " pre-bound"
} }
Logf("Creating a PV followed by a%s PVC", preBindMsg) e2elog.Logf("Creating a PV followed by a%s PVC", preBindMsg)
// make the pv and pvc definitions // make the pv and pvc definitions
pv := MakePersistentVolume(pvConfig) pv := MakePersistentVolume(pvConfig)
@ -406,7 +407,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// WaitOnPVandPVC waits for the pv and pvc to bind to each other. // WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error { func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV // Wait for newly created PVC to bind to the PV
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) e2elog.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, Poll, ClaimBindingTimeout) err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, Poll, ClaimBindingTimeout)
if err != nil { if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err) return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
@ -462,8 +463,8 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
for pvName := range pvols { for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, Poll, PVBindingTimeout) err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, Poll, PVBindingTimeout)
if err != nil && len(pvols) > len(claims) { if err != nil && len(pvols) > len(claims) {
Logf("WARN: pv %v is not bound after max wait", pvName) e2elog.Logf("WARN: pv %v is not bound after max wait", pvName)
Logf(" This may be ok since there are more pvs than pvcs") e2elog.Logf(" This may be ok since there are more pvs than pvcs")
continue continue
} }
if err != nil { if err != nil {
@ -503,7 +504,7 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil { if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err) return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
} }
Logf("Pod %v succeeded ", pod.Name) e2elog.Logf("Pod %v succeeded ", pod.Name)
return nil return nil
} }
@ -519,7 +520,7 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod // DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing. // not existing.
func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNamespace string) error { func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNamespace string) error {
Logf("Deleting pod %q in namespace %q", podName, podNamespace) e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil) err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
@ -527,7 +528,7 @@ func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNa
} }
return fmt.Errorf("pod Delete API error: %v", err) return fmt.Errorf("pod Delete API error: %v", err)
} }
Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName) e2elog.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = f.WaitForPodNotFound(podName, PodDeleteTimeout) err = f.WaitForPodNotFound(podName, PodDeleteTimeout)
if err != nil { if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err) return fmt.Errorf("pod %q was not deleted: %v", podName, err)
@ -539,7 +540,7 @@ func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNa
// Note: need named return value so that the err assignment in the defer sets the returned error. // Note: need named return value so that the err assignment in the defer sets the returned error.
// Has been shown to be necessary using Go 1.7. // Has been shown to be necessary using Go 1.7.
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (err error) { func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (err error) {
Logf("Creating nfs test pod") e2elog.Logf("Creating nfs test pod")
pod := MakeWritePod(ns, pvc) pod := MakeWritePod(ns, pvc)
runPod, err := c.CoreV1().Pods(ns).Create(pod) runPod, err := c.CoreV1().Pods(ns).Create(pod)
if err != nil { if err != nil {
@ -575,7 +576,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
var claimRef *v1.ObjectReference var claimRef *v1.ObjectReference
// If the reclaimPolicy is not provided, assume Retain // If the reclaimPolicy is not provided, assume Retain
if pvConfig.ReclaimPolicy == "" { if pvConfig.ReclaimPolicy == "" {
Logf("PV ReclaimPolicy unspecified, default: Retain") e2elog.Logf("PV ReclaimPolicy unspecified, default: Retain")
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRetain pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRetain
} }
if pvConfig.Prebind != nil { if pvConfig.Prebind != nil {
@ -619,7 +620,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
// Specs are expected to match this test's PersistentVolume // Specs are expected to match this test's PersistentVolume
if len(cfg.AccessModes) == 0 { if len(cfg.AccessModes) == 0 {
Logf("AccessModes unspecified, default: all modes (RWO, RWX, ROX).") e2elog.Logf("AccessModes unspecified, default: all modes (RWO, RWX, ROX).")
cfg.AccessModes = append(cfg.AccessModes, v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadOnlyMany) cfg.AccessModes = append(cfg.AccessModes, v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadOnlyMany)
} }
@ -648,10 +649,10 @@ func createPDWithRetry(zone string) (string, error) {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) { for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
newDiskName, err := createPD(zone) newDiskName, err := createPD(zone)
if err != nil { if err != nil {
Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err) e2elog.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
continue continue
} }
Logf("Successfully created a new PD: %q.", newDiskName) e2elog.Logf("Successfully created a new PD: %q.", newDiskName)
return newDiskName, nil return newDiskName, nil
} }
return "", err return "", err
@ -673,10 +674,10 @@ func DeletePDWithRetry(diskName string) error {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) { for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
err = deletePD(diskName) err = deletePD(diskName)
if err != nil { if err != nil {
Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err) e2elog.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
continue continue
} }
Logf("Successfully deleted PD %q.", diskName) e2elog.Logf("Successfully deleted PD %q.", diskName)
return nil return nil
} }
return fmt.Errorf("unable to delete PD %q: %v", diskName, err) return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
@ -1041,7 +1042,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
if len(scName) == 0 { if len(scName) == 0 {
return "", fmt.Errorf("No default storage class found") return "", fmt.Errorf("No default storage class found")
} }
Logf("Default storage class: %q", scName) e2elog.Logf("Default storage class: %q", scName)
return scName, nil return scName, nil
} }

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale" scaleclient "k8s.io/client-go/scale"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -98,7 +99,7 @@ func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, na
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc) applyUpdate(rc)
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil { if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil {
Logf("Updating replication controller %q", name) e2elog.Logf("Updating replication controller %q", name)
return true, nil return true, nil
} }
updateErr = err updateErr = err
@ -144,10 +145,10 @@ func WaitForReplicationController(c clientset.Interface, namespace, name string,
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) _, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) e2elog.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil return !exist, nil
} }
Logf("ReplicationController %s in namespace %s found.", name, namespace) e2elog.Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil return exist, nil
}) })
if err != nil { if err != nil {
@ -164,13 +165,13 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch { switch {
case len(rcs.Items) != 0: case len(rcs.Items) != 0:
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace) e2elog.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
return exist, nil return exist, nil
case len(rcs.Items) == 0: case len(rcs.Items) == 0:
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace) e2elog.Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil return !exist, nil
default: default:
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err) e2elog.Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil return false, nil
} }
}) })
@ -227,25 +228,25 @@ waitLoop:
for _, podID := range pods { for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns)) running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
if running != "true" { if running != "true" {
Logf("%s is created but not running", podID) e2elog.Logf("%s is created but not running", podID)
continue waitLoop continue waitLoop
} }
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns)) currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
currentImage = trimDockerRegistry(currentImage) currentImage = trimDockerRegistry(currentImage)
if currentImage != containerImage { if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage) e2elog.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop continue waitLoop
} }
// Call the generic validator function here. // Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content. // This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil { if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err) e2elog.Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop continue waitLoop
} }
Logf("%s is verified up and running", podID) e2elog.Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID) runningPods = append(runningPods, podID)
} }
// If we reach here, then all our checks passed. // If we reach here, then all our checks passed.

View File

@ -18,6 +18,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
], ],

View File

@ -24,12 +24,13 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
// UpdateReplicaSetWithRetries updates replicaset template with retries. // UpdateReplicaSetWithRetries updates replicaset template with retries.
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) { func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, framework.Logf, framework.Poll, framework.PollShortTimeout) return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, framework.Poll, framework.PollShortTimeout)
} }
// CheckNewRSAnnotations check if the new RS's annotation is as expected // CheckNewRSAnnotations check if the new RS's annotation is as expected

View File

@ -32,6 +32,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
// ResourceConstraint is a struct to hold constraints. // ResourceConstraint is a struct to hold constraints.
@ -166,13 +167,13 @@ func (w *resourceGatherWorker) singleProbe() {
} else { } else {
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs }) nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
if err != nil { if err != nil {
Logf("Error while reading data from %v: %v", w.nodeName, err) e2elog.Logf("Error while reading data from %v: %v", w.nodeName, err)
return return
} }
for k, v := range nodeUsage { for k, v := range nodeUsage {
data[k] = v data[k] = v
if w.printVerboseLogs { if w.printVerboseLogs {
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes) e2elog.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
} }
} }
} }
@ -182,7 +183,7 @@ func (w *resourceGatherWorker) singleProbe() {
func (w *resourceGatherWorker) gather(initialSleep time.Duration) { func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
defer w.wg.Done() defer w.wg.Done()
defer Logf("Closing worker for %v", w.nodeName) defer e2elog.Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }() defer func() { w.finished = true }()
select { select {
case <-time.After(initialSleep): case <-time.After(initialSleep):
@ -257,7 +258,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
if pods == nil { if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
if err != nil { if err != nil {
Logf("Error while listing Pods: %v", err) e2elog.Logf("Error while listing Pods: %v", err)
return nil, err return nil, err
} }
} }
@ -281,7 +282,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
} }
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil { if err != nil {
Logf("Error while listing Nodes: %v", err) e2elog.Logf("Error while listing Nodes: %v", err)
return nil, err return nil, err
} }
@ -330,7 +331,7 @@ func (g *ContainerResourceGatherer) StartGatheringData() {
// specified resource constraints. // specified resource constraints.
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) { func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
close(g.stopCh) close(g.stopCh)
Logf("Closed stop channel. Waiting for %v workers", len(g.workers)) e2elog.Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{}) finished := make(chan struct{})
go func() { go func() {
g.workerWg.Wait() g.workerWg.Wait()
@ -338,7 +339,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
}() }()
select { select {
case <-finished: case <-finished:
Logf("Waitgroup finished.") e2elog.Logf("Waitgroup finished.")
case <-time.After(2 * time.Minute): case <-time.After(2 * time.Minute):
unfinished := make([]string, 0) unfinished := make([]string, 0)
for i := range g.workers { for i := range g.workers {
@ -346,11 +347,11 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
unfinished = append(unfinished, g.workers[i].nodeName) unfinished = append(unfinished, g.workers[i].nodeName)
} }
} }
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished) e2elog.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
} }
if len(percentiles) == 0 { if len(percentiles) == 0 {
Logf("Warning! Empty percentile list for stopAndPrintData.") e2elog.Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data") return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
} }
data := make(map[int]ResourceUsagePerContainer) data := make(map[int]ResourceUsagePerContainer)

View File

@ -39,6 +39,7 @@ import (
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -438,22 +439,22 @@ func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName
err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) { err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) {
endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil { if err != nil {
Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) e2elog.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err)
return false, nil return false, nil
} }
if len(endpoints.Subsets) == 0 { if len(endpoints.Subsets) == 0 {
Logf("Expect endpoints with subsets, got none.") e2elog.Logf("Expect endpoints with subsets, got none.")
return false, nil return false, nil
} }
// TODO: Handle multiple endpoints // TODO: Handle multiple endpoints
if len(endpoints.Subsets[0].Addresses) == 0 { if len(endpoints.Subsets[0].Addresses) == 0 {
Logf("Expected Ready endpoints - found none") e2elog.Logf("Expected Ready endpoints - found none")
return false, nil return false, nil
} }
epHostName := *endpoints.Subsets[0].Addresses[0].NodeName epHostName := *endpoints.Subsets[0].Addresses[0].NodeName
Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName) e2elog.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName)
if epHostName != nodeName { if epHostName != nodeName {
Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName) e2elog.Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -546,7 +547,7 @@ func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func
// WaitForNewIngressIPOrFail waits for the given service to get a new ingress IP, or fails after the given timeout // WaitForNewIngressIPOrFail waits for the given service to get a new ingress IP, or fails after the given timeout
func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service { func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service {
Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name) e2elog.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name)
service := j.waitForConditionOrFail(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool { service := j.waitForConditionOrFail(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool {
if len(svc.Status.LoadBalancer.Ingress) == 0 { if len(svc.Status.LoadBalancer.Ingress) == 0 {
return false return false
@ -572,7 +573,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini
s.Spec.Ports[0].NodePort = int32(newPort) s.Spec.Ports[0].NodePort = int32(newPort)
}) })
if err != nil && strings.Contains(err.Error(), portallocator.ErrAllocated.Error()) { if err != nil && strings.Contains(err.Error(), portallocator.ErrAllocated.Error()) {
Logf("tried nodePort %d, but it is in use, will try another", newPort) e2elog.Logf("tried nodePort %d, but it is in use, will try another", newPort)
continue continue
} }
// Otherwise err was nil or err was a real error // Otherwise err was nil or err was a real error
@ -586,7 +587,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini
// WaitForLoadBalancerOrFail waits the given service to have a LoadBalancer, or fails after the given timeout // WaitForLoadBalancerOrFail waits the given service to have a LoadBalancer, or fails after the given timeout
func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service { func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service {
Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) e2elog.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool { service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool {
return len(svc.Status.LoadBalancer.Ingress) > 0 return len(svc.Status.LoadBalancer.Ingress) > 0
}) })
@ -598,11 +599,11 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
defer func() { defer func() {
if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil {
Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) e2elog.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err)
} }
}() }()
Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name) e2elog.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
service := j.waitForConditionOrFail(namespace, name, timeout, "have no load balancer", func(svc *v1.Service) bool { service := j.waitForConditionOrFail(namespace, name, timeout, "have no load balancer", func(svc *v1.Service) bool {
return len(svc.Status.LoadBalancer.Ingress) == 0 return len(svc.Status.LoadBalancer.Ingress) == 0
}) })
@ -791,7 +792,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
timeout := 2 * time.Minute timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(j.Labels)) label := labels.SelectorFromSet(labels.Set(j.Labels))
Logf("Waiting up to %v for %d pods to be created", timeout, replicas) e2elog.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()} options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := j.Client.CoreV1().Pods(namespace).List(options) pods, err := j.Client.CoreV1().Pods(namespace).List(options)
@ -807,10 +808,10 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
found = append(found, pod.Name) found = append(found, pod.Name)
} }
if len(found) == replicas { if len(found) == replicas {
Logf("Found all %d pods", replicas) e2elog.Logf("Found all %d pods", replicas)
return found, nil return found, nil
} }
Logf("Found %d/%d pods - will retry", len(found), replicas) e2elog.Logf("Found %d/%d pods - will retry", len(found), replicas)
} }
return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas) return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas)
} }
@ -859,7 +860,7 @@ func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool
// LaunchNetexecPodOnNode launches a netexec pod on the given node. // LaunchNetexecPodOnNode launches a netexec pod on the given node.
func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) { func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) {
Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name) e2elog.Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name)
pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork) pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork)
pod.Spec.NodeName = nodeName pod.Spec.NodeName = nodeName
pod.ObjectMeta.Labels = j.Labels pod.ObjectMeta.Labels = j.Labels
@ -867,7 +868,7 @@ func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName
_, err := podClient.Create(pod) _, err := podClient.Create(pod)
ExpectNoError(err) ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName)) ExpectNoError(f.WaitForPodRunning(podName))
Logf("Netexec pod %q in namespace %q running", pod.Name, f.Namespace.Name) e2elog.Logf("Netexec pod %q in namespace %q running", pod.Name, f.Namespace.Name)
} }
// newEchoServerPodSpec returns the pod spec of echo server pod // newEchoServerPodSpec returns the pod spec of echo server pod
@ -895,7 +896,7 @@ func newEchoServerPodSpec(podName string) *v1.Pod {
// as the target for source IP preservation test. The client's source ip would // as the target for source IP preservation test. The client's source ip would
// be echoed back by the web server. // be echoed back by the web server.
func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podName string) { func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podName string) {
Logf("Creating echo server pod %q in namespace %q", podName, f.Namespace.Name) e2elog.Logf("Creating echo server pod %q in namespace %q", podName, f.Namespace.Name)
pod := newEchoServerPodSpec(podName) pod := newEchoServerPodSpec(podName)
pod.Spec.NodeName = nodeName pod.Spec.NodeName = nodeName
pod.ObjectMeta.Labels = j.Labels pod.ObjectMeta.Labels = j.Labels
@ -903,7 +904,7 @@ func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podNa
_, err := podClient.Create(pod) _, err := podClient.Create(pod)
ExpectNoError(err) ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName)) ExpectNoError(f.WaitForPodRunning(podName))
Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name) e2elog.Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name)
} }
// TestReachableHTTP tests that the given host serves HTTP on the given port. // TestReachableHTTP tests that the given host serves HTTP on the given port.
@ -1033,15 +1034,15 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err
Failf("Got empty IP for reachability check (%s)", url) Failf("Got empty IP for reachability check (%s)", url)
return false, fmt.Errorf("invalid input ip or port") return false, fmt.Errorf("invalid input ip or port")
} }
Logf("Testing HTTP health check on %v", url) e2elog.Logf("Testing HTTP health check on %v", url)
resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second) resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second)
if err != nil { if err != nil {
Logf("Got error testing for reachability of %s: %v", url, err) e2elog.Logf("Got error testing for reachability of %s: %v", url, err)
return false, err return false, err
} }
defer resp.Body.Close() defer resp.Body.Close()
if err != nil { if err != nil {
Logf("Got error reading response from %s: %v", url, err) e2elog.Logf("Got error reading response from %s: %v", url, err)
return false, err return false, err
} }
// HealthCheck responder returns 503 for no local endpoints // HealthCheck responder returns 503 for no local endpoints
@ -1318,22 +1319,22 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
// verify service from node // verify service from node
func() string { func() string {
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
Logf("Executing cmd %q on host %v", cmd, host) e2elog.Logf("Executing cmd %q on host %v", cmd, host)
result, err := e2essh.SSH(cmd, host, TestContext.Provider) result, err := e2essh.SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
Logf("error while SSH-ing to node: %v", err) e2elog.Logf("error while SSH-ing to node: %v", err)
} }
return result.Stdout return result.Stdout
}, },
// verify service from pod // verify service from pod
func() string { func() string {
cmd := buildCommand("wget -q -T 1 -O -") cmd := buildCommand("wget -q -T 1 -O -")
Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) e2elog.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName)
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec. // TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
output, err := RunHostCmd(ns, execPodName, cmd) output, err := RunHostCmd(ns, execPodName, cmd)
if err != nil { if err != nil {
Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) e2elog.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output)
} }
return output return output
}, },
@ -1359,12 +1360,12 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
// and we need a better way to track how often it occurs. // and we need a better way to track how often it occurs.
if gotEndpoints.IsSuperset(expectedEndpoints) { if gotEndpoints.IsSuperset(expectedEndpoints) {
if !gotEndpoints.Equal(expectedEndpoints) { if !gotEndpoints.Equal(expectedEndpoints) {
Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints)) e2elog.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints))
} }
passed = true passed = true
break break
} }
Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints)) e2elog.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints))
} }
if !passed { if !passed {
// Sort the lists so they're easier to visually diff. // Sort the lists so they're easier to visually diff.
@ -1391,12 +1392,12 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
result, err := e2essh.SSH(command, host, TestContext.Provider) result, err := e2essh.SSH(command, host, TestContext.Provider)
if err != nil { if err != nil {
e2essh.LogResult(result) e2essh.LogResult(result)
Logf("error while SSH-ing to node: %v", err) e2elog.Logf("error while SSH-ing to node: %v", err)
} }
if result.Code != 99 { if result.Code != 99 {
return nil return nil
} }
Logf("service still alive - still waiting") e2elog.Logf("service still alive - still waiting")
} }
return fmt.Errorf("waiting for service to be down timed out") return fmt.Errorf("waiting for service to be down timed out")
} }
@ -1408,10 +1409,10 @@ func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zo
// DescribeSvc logs the output of kubectl describe svc for the given namespace // DescribeSvc logs the output of kubectl describe svc for the given namespace
func DescribeSvc(ns string) { func DescribeSvc(ns string) {
Logf("\nOutput of kubectl describe svc:\n") e2elog.Logf("\nOutput of kubectl describe svc:\n")
desc, _ := RunKubectl( desc, _ := RunKubectl(
"describe", "svc", fmt.Sprintf("--namespace=%v", ns)) "describe", "svc", fmt.Sprintf("--namespace=%v", ns))
Logf(desc) e2elog.Logf(desc)
} }
// CreateServiceSpec returns a Service object for testing. // CreateServiceSpec returns a Service object for testing.
@ -1460,7 +1461,7 @@ type affinityTracker struct {
// Record the response going to a given host. // Record the response going to a given host.
func (at *affinityTracker) recordHost(host string) { func (at *affinityTracker) recordHost(host string) {
at.hostTrace = append(at.hostTrace, host) at.hostTrace = append(at.hostTrace, host)
Logf("Received response from host: %s", host) e2elog.Logf("Received response from host: %s", host)
} }
// Check that we got a constant count requests going to the same host. // Check that we got a constant count requests going to the same host.
@ -1483,7 +1484,7 @@ func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds b
} }
func checkAffinityFailed(tracker affinityTracker, err string) { func checkAffinityFailed(tracker affinityTracker, err string) {
Logf("%v", tracker.hostTrace) e2elog.Logf("%v", tracker.hostTrace)
Failf(err) Failf(err)
} }
@ -1504,7 +1505,7 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIP string, target
if execPod != nil { if execPod != nil {
stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd) stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
Logf("Failed to get response from %s. Retry until timeout", targetIPPort) e2elog.Logf("Failed to get response from %s. Retry until timeout", targetIPPort)
return false, nil return false, nil
} }
tracker.recordHost(stdout) tracker.recordHost(stdout)

View File

@ -19,6 +19,8 @@ package framework
import ( import (
"fmt" "fmt"
"time" "time"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
const ( const (
@ -51,14 +53,14 @@ func WaitForGroupSize(group string, size int32) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
currentSize, err := GroupSize(group) currentSize, err := GroupSize(group)
if err != nil { if err != nil {
Logf("Failed to get node instance group size: %v", err) e2elog.Logf("Failed to get node instance group size: %v", err)
continue continue
} }
if currentSize != int(size) { if currentSize != int(size) {
Logf("Waiting for node instance group size %d, current size %d", size, currentSize) e2elog.Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
continue continue
} }
Logf("Node instance group has reached the desired size %d", size) e2elog.Logf("Node instance group has reached the desired size %d", size)
return nil return nil
} }
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size) return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)

View File

@ -38,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/manifest" "k8s.io/kubernetes/test/e2e/manifest"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -94,18 +95,18 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.Sta
return filepath.Join(manifestPath, file) return filepath.Join(manifestPath, file)
} }
Logf("Parsing statefulset from %v", mkpath("statefulset.yaml")) e2elog.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns) ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
ExpectNoError(err) ExpectNoError(err)
Logf("Parsing service from %v", mkpath("service.yaml")) e2elog.Logf("Parsing service from %v", mkpath("service.yaml"))
svc, err := manifest.SvcFromManifest(mkpath("service.yaml")) svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
ExpectNoError(err) ExpectNoError(err)
Logf(fmt.Sprintf("creating " + ss.Name + " service")) e2elog.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
_, err = s.c.CoreV1().Services(ns).Create(svc) _, err = s.c.CoreV1().Services(ns).Create(svc)
ExpectNoError(err) ExpectNoError(err)
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)) e2elog.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
_, err = s.c.AppsV1().StatefulSets(ns).Create(ss) _, err = s.c.AppsV1().StatefulSets(ns).Create(ss)
ExpectNoError(err) ExpectNoError(err)
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss) s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
@ -134,7 +135,7 @@ func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string)
podList := s.GetPodList(ss) podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items { for _, statefulPod := range podList.Items {
stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout) e2elog.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
if err != nil { if err != nil {
return err return err
} }
@ -162,9 +163,9 @@ func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) { func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
var i int32 var i int32
for i = 0; i < *(ss.Spec.Replicas); i++ { for i = 0; i < *(ss.Spec.Replicas); i++ {
Logf("Waiting for stateful pod at index %v to enter Running", i) e2elog.Logf("Waiting for stateful pod at index %v to enter Running", i)
s.WaitForRunning(i+1, i, ss) s.WaitForRunning(i+1, i, ss)
Logf("Resuming stateful pod at index %v", i) e2elog.Logf("Resuming stateful pod at index %v", i)
s.ResumeNextPod(ss) s.ResumeNextPod(ss)
} }
} }
@ -200,7 +201,7 @@ func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.Stat
name := ss.Name name := ss.Name
ns := ss.Namespace ns := ss.Namespace
Logf("Scaling statefulset %s to %d", name, count) e2elog.Logf("Scaling statefulset %s to %d", name, count)
ss = s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count }) ss = s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
var statefulPodList *v1.PodList var statefulPodList *v1.PodList
@ -282,12 +283,12 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful
if hard { if hard {
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items)) Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
} else { } else {
Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount) e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
} }
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
continue continue
} }
Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t)) e2elog.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
} }
@ -300,7 +301,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
podList := s.GetPodList(ss) podList := s.GetPodList(ss)
s.SortStatefulPods(podList) s.SortStatefulPods(podList)
if int32(len(podList.Items)) < numPodsRunning { if int32(len(podList.Items)) < numPodsRunning {
Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning) e2elog.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
return false, nil return false, nil
} }
if int32(len(podList.Items)) > numPodsRunning { if int32(len(podList.Items)) > numPodsRunning {
@ -310,7 +311,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady) shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
isReady := podutil.IsPodReady(&p) isReady := podutil.IsPodReady(&p)
desiredReadiness := shouldBeReady == isReady desiredReadiness := shouldBeReady == isReady
Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady) e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
if p.Status.Phase != v1.PodRunning || !desiredReadiness { if p.Status.Phase != v1.PodRunning || !desiredReadiness {
return false, nil return false, nil
} }
@ -407,14 +408,14 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.S
return false, nil return false, nil
} }
if set.Status.UpdateRevision != set.Status.CurrentRevision { if set.Status.UpdateRevision != set.Status.CurrentRevision {
Logf("Waiting for StatefulSet %s/%s to complete update", e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
set.Namespace, set.Namespace,
set.Name, set.Name,
) )
s.SortStatefulPods(pods) s.SortStatefulPods(pods)
for i := range pods.Items { for i := range pods.Items {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision { if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
Logf("Waiting for Pod %s/%s to have revision %s update revision %s", e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
pods.Items[i].Name, pods.Items[i].Name,
set.Status.UpdateRevision, set.Status.UpdateRevision,
@ -453,14 +454,14 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
return false, nil return false, nil
} }
if partition <= 0 && set.Status.UpdateRevision != set.Status.CurrentRevision { if partition <= 0 && set.Status.UpdateRevision != set.Status.CurrentRevision {
Logf("Waiting for StatefulSet %s/%s to complete update", e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
set.Namespace, set.Namespace,
set.Name, set.Name,
) )
s.SortStatefulPods(pods) s.SortStatefulPods(pods)
for i := range pods.Items { for i := range pods.Items {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision { if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
Logf("Waiting for Pod %s/%s to have revision %s update revision %s", e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
pods.Items[i].Name, pods.Items[i].Name,
set.Status.UpdateRevision, set.Status.UpdateRevision,
@ -471,7 +472,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
} }
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- { for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision { if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
Logf("Waiting for Pod %s/%s to have revision %s update revision %s", e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
pods.Items[i].Name, pods.Items[i].Name,
set.Status.UpdateRevision, set.Status.UpdateRevision,
@ -528,7 +529,7 @@ func (s *StatefulSetTester) BreakPodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod)
// Ignore 'mv' errors to make this idempotent. // Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path) cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path)
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err return err
} }
@ -552,7 +553,7 @@ func (s *StatefulSetTester) RestorePodHTTPProbe(ss *apps.StatefulSet, pod *v1.Po
// Ignore 'mv' errors to make this idempotent. // Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path) cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path)
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err return err
} }
@ -598,14 +599,14 @@ func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
} }
_, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout) _, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
ExpectNoError(err) ExpectNoError(err)
Logf("Resumed pod %v", pod.Name) e2elog.Logf("Resumed pod %v", pod.Name)
resumedPod = pod.Name resumedPod = pod.Name
} }
} }
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas // WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) { func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
@ -618,7 +619,7 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
return false, nil return false, nil
} }
if ssGet.Status.ReadyReplicas != expectedReplicas { if ssGet.Status.ReadyReplicas != expectedReplicas {
Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas) e2elog.Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -630,7 +631,7 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas // WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) { func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
@ -643,7 +644,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
return false, nil return false, nil
} }
if ssGet.Status.Replicas != expectedReplicas { if ssGet.Status.Replicas != expectedReplicas {
Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas) e2elog.Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -655,7 +656,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName. // CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
func (s *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error { func (s *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error {
Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName) e2elog.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
if expectedServiceName != ss.Spec.ServiceName { if expectedServiceName != ss.Spec.ServiceName {
return fmt.Errorf("Wrong service name governing statefulset. Expected %s got %s", return fmt.Errorf("Wrong service name governing statefulset. Expected %s got %s",
@ -686,7 +687,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
errList = append(errList, fmt.Sprintf("%v", err)) errList = append(errList, fmt.Sprintf("%v", err))
} }
sst.WaitForStatusReplicas(ss, 0) sst.WaitForStatusReplicas(ss, 0)
Logf("Deleting statefulset %v", ss.Name) e2elog.Logf("Deleting statefulset %v", ss.Name)
// Use OrphanDependents=false so it's deleted synchronously. // Use OrphanDependents=false so it's deleted synchronously.
// We already made sure the Pods are gone inside Scale(). // We already made sure the Pods are gone inside Scale().
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
@ -700,13 +701,13 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
Logf("WARNING: Failed to list pvcs, retrying %v", err) e2elog.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil return false, nil
} }
for _, pvc := range pvcList.Items { for _, pvc := range pvcList.Items {
pvNames.Insert(pvc.Spec.VolumeName) pvNames.Insert(pvc.Spec.VolumeName)
// TODO: Double check that there are no pods referencing the pvc // TODO: Double check that there are no pods referencing the pvc
Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) e2elog.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil { if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
return false, nil return false, nil
} }
@ -720,7 +721,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
Logf("WARNING: Failed to list pvs, retrying %v", err) e2elog.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil return false, nil
} }
waitingFor := []string{} waitingFor := []string{}
@ -732,7 +733,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) {
if len(waitingFor) == 0 { if len(waitingFor) == 0 {
return true, nil return true, nil
} }
Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n")) e2elog.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
return false, nil return false, nil
}) })
if pollErr != nil { if pollErr != nil {
@ -879,7 +880,7 @@ func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string,
// Apply the update, then attempt to push it to the apiserver. // Apply the update, then attempt to push it to the apiserver.
applyUpdate(statefulSet) applyUpdate(statefulSet)
if statefulSet, err = statefulSets.Update(statefulSet); err == nil { if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
Logf("Updating stateful set %s", name) e2elog.Logf("Updating stateful set %s", name)
return true, nil return true, nil
} }
updateErr = err updateErr = err

View File

@ -33,6 +33,7 @@ import (
cliflag "k8s.io/component-base/cli/flag" cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog" "k8s.io/klog"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
const ( const (
@ -421,7 +422,7 @@ func AfterReadingAllFlags(t *TestContextType) {
if TestContext.Provider == "" { if TestContext.Provider == "" {
// Some users of the e2e.test binary pass --provider=. // Some users of the e2e.test binary pass --provider=.
// We need to support that, changing it would break those usages. // We need to support that, changing it would break those usages.
Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.") e2elog.Logf("The --provider flag is not set. Continuing as if --provider=skeleton had been used.")
TestContext.Provider = "skeleton" TestContext.Provider = "skeleton"
} }

File diff suppressed because it is too large Load Diff

View File

@ -13,6 +13,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",

View File

@ -52,6 +52,7 @@ import (
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -259,7 +260,7 @@ func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod
gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil") gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
ip = pod.Status.PodIP ip = pod.Status.PodIP
gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name)) gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
framework.Logf("%s server pod IP address: %s", config.Prefix, ip) e2elog.Logf("%s server pod IP address: %s", config.Prefix, ip)
return pod, ip return pod, ip
} }
@ -353,7 +354,7 @@ func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
// ok if the server pod already exists. TODO: make this controllable by callers // ok if the server pod already exists. TODO: make this controllable by callers
if err != nil { if err != nil {
if apierrs.IsAlreadyExists(err) { if apierrs.IsAlreadyExists(err) {
framework.Logf("Ignore \"already-exists\" error, re-get pod...") e2elog.Logf("Ignore \"already-exists\" error, re-get pod...")
ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName)) ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{}) serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err) framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
@ -391,17 +392,17 @@ func CleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, se
ns := f.Namespace ns := f.Namespace
if secret != nil { if secret != nil {
framework.Logf("Deleting server secret %q...", secret.Name) e2elog.Logf("Deleting server secret %q...", secret.Name)
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{}) err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
if err != nil { if err != nil {
framework.Logf("Delete secret failed: %v", err) e2elog.Logf("Delete secret failed: %v", err)
} }
} }
framework.Logf("Deleting server pod %q...", serverPod.Name) e2elog.Logf("Deleting server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod) err := framework.DeletePodWithWait(f, cs, serverPod)
if err != nil { if err != nil {
framework.Logf("Server pod delete failed: %v", err) e2elog.Logf("Server pod delete failed: %v", err)
} }
} }
@ -630,7 +631,7 @@ func GenerateWriteandExecuteScriptFileCmd(content, fileName, filePath string) []
fullPath := filepath.Join(filePath, scriptName) fullPath := filepath.Join(filePath, scriptName)
cmd := "echo \"" + content + "\" > " + fullPath + "; .\\" + fullPath cmd := "echo \"" + content + "\" > " + fullPath + "; .\\" + fullPath
framework.Logf("generated pod command %s", cmd) e2elog.Logf("generated pod command %s", cmd)
return []string{"powershell", "/c", cmd} return []string{"powershell", "/c", cmd}
} }
scriptName := fmt.Sprintf("%s.sh", fileName) scriptName := fmt.Sprintf("%s.sh", fileName)

View File

@ -376,9 +376,9 @@ var _ = SIGDescribe("DNS", func() {
} }
testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod) testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod)
framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name) framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name)
framework.Logf("Created pod %v", testAgnhostPod) e2elog.Logf("Created pod %v", testAgnhostPod)
defer func() { defer func() {
framework.Logf("Deleting pod %s...", testAgnhostPod.Name) e2elog.Logf("Deleting pod %s...", testAgnhostPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
} }

View File

@ -250,7 +250,7 @@ func StartJob(f *framework.Framework, completions int32) {
ns := f.Namespace.Name ns := f.Namespace.Name
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob) _, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Created job %v", testJob) e2elog.Logf("Created job %v", testJob)
} }
// VerifyJobNCompletions verifies that the job has completions number of successful pods // VerifyJobNCompletions verifies that the job has completions number of successful pods
@ -260,7 +260,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
createdPods := pods.Items createdPods := pods.Items
createdPodNames := podNames(createdPods) createdPodNames := podNames(createdPods)
framework.Logf("Got the following pods for job cuda-add: %v", createdPodNames) e2elog.Logf("Got the following pods for job cuda-add: %v", createdPodNames)
successes := int32(0) successes := int32(0)
for _, podName := range createdPodNames { for _, podName := range createdPodNames {

View File

@ -48,6 +48,7 @@ go_library(
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/blang/semver:go_default_library",
@ -157,6 +158,7 @@ go_test(
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/volume:go_default_library", "//test/e2e/framework/volume:go_default_library",
"//test/e2e_node/perf/workloads:go_default_library", "//test/e2e_node/perf/workloads:go_default_library",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/perftype" "k8s.io/kubernetes/test/e2e/perftype"
nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype" nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype"
@ -46,9 +47,9 @@ func dumpDataToFile(data interface{}, labels map[string]string, prefix string) {
testName := labels["test"] testName := labels["test"]
fileName := path.Join(framework.TestContext.ReportDir, fmt.Sprintf("%s-%s-%s.json", prefix, framework.TestContext.ReportPrefix, testName)) fileName := path.Join(framework.TestContext.ReportDir, fmt.Sprintf("%s-%s-%s.json", prefix, framework.TestContext.ReportPrefix, testName))
labels["timestamp"] = strconv.FormatInt(time.Now().UTC().Unix(), 10) labels["timestamp"] = strconv.FormatInt(time.Now().UTC().Unix(), 10)
framework.Logf("Dumping perf data for test %q to %q.", testName, fileName) e2elog.Logf("Dumping perf data for test %q to %q.", testName, fileName)
if err := ioutil.WriteFile(fileName, []byte(framework.PrettyPrintJSON(data)), 0644); err != nil { if err := ioutil.WriteFile(fileName, []byte(framework.PrettyPrintJSON(data)), 0644); err != nil {
framework.Logf("Failed to write perf data for test %q to %q: %v", testName, fileName, err) e2elog.Logf("Failed to write perf data for test %q to %q: %v", testName, fileName, err)
} }
} }
@ -82,7 +83,7 @@ func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]metav1
timeSeries.ResourceData = rc.GetResourceTimeSeries() timeSeries.ResourceData = rc.GetResourceTimeSeries()
if framework.TestContext.ReportDir == "" { if framework.TestContext.ReportDir == "" {
framework.Logf("%s %s\n%s", TimeSeriesTag, framework.PrettyPrintJSON(timeSeries), TimeSeriesEnd) e2elog.Logf("%s %s\n%s", TimeSeriesTag, framework.PrettyPrintJSON(timeSeries), TimeSeriesEnd)
return return
} }
dumpDataToFile(timeSeries, timeSeries.Labels, "time_series") dumpDataToFile(timeSeries, timeSeries.Labels, "time_series")

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -161,9 +162,9 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}, },
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Running containers:") e2elog.Logf("Running containers:")
for _, c := range containers { for _, c := range containers {
framework.Logf("%+v", c) e2elog.Logf("%+v", c)
} }
} }
}) })

View File

@ -35,6 +35,7 @@ import (
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -198,7 +199,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance. // Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
// Note that it will cause higher resource usage. // Note that it will cause higher resource usage.
tempSetCurrentKubeletConfig(f, func(cfg *kubeletconfig.KubeletConfiguration) { tempSetCurrentKubeletConfig(f, func(cfg *kubeletconfig.KubeletConfiguration) {
framework.Logf("Old QPS limit is: %d", cfg.KubeAPIQPS) e2elog.Logf("Old QPS limit is: %d", cfg.KubeAPIQPS)
// Set new API QPS limit // Set new API QPS limit
cfg.KubeAPIQPS = int32(itArg.APIQPSLimit) cfg.KubeAPIQPS = int32(itArg.APIQPSLimit)
}) })
@ -539,7 +540,7 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset! // TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
latencyMetrics, _ := getPodStartLatency(kubeletAddr) latencyMetrics, _ := getPodStartLatency(kubeletAddr)
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics)) e2elog.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics))
podStartupLatency := framework.ExtractLatencyMetrics(e2eLags) podStartupLatency := framework.ExtractLatencyMetrics(e2eLags)

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
dm "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" dm "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
@ -69,7 +70,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
} }
socketPath := pluginSockDir + "dp." + fmt.Sprintf("%d", time.Now().Unix()) socketPath := pluginSockDir + "dp." + fmt.Sprintf("%d", time.Now().Unix())
framework.Logf("socketPath %v", socketPath) e2elog.Logf("socketPath %v", socketPath)
dp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false) dp1 := dm.NewDevicePluginStub(devs, socketPath, resourceName, false)
dp1.SetAllocFunc(stubAllocFunc) dp1.SetAllocFunc(stubAllocFunc)
@ -257,7 +258,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
return false return false
} }
currentCount = p.Status.ContainerStatuses[0].RestartCount currentCount = p.Status.ContainerStatuses[0].RestartCount
framework.Logf("initial %v, current %v", initialCount, currentCount) e2elog.Logf("initial %v, current %v", initialCount, currentCount)
return currentCount > initialCount return currentCount > initialCount
}, 5*time.Minute, framework.Poll).Should(BeTrue()) }, 5*time.Minute, framework.Poll).Should(BeTrue())
} }
@ -269,7 +270,7 @@ func parseLog(f *framework.Framework, podName string, contName string, re string
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
} }
framework.Logf("got pod logs: %v", logs) e2elog.Logf("got pod logs: %v", logs)
regex := regexp.MustCompile(re) regex := regexp.MustCompile(re)
matches := regex.FindStringSubmatch(logs) matches := regex.FindStringSubmatch(logs)
if len(matches) < 2 { if len(matches) < 2 {

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -91,7 +92,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
if len(filename) == 0 { if len(filename) == 0 {
continue continue
} }
framework.Logf("Removing checkpoint %q", filename) e2elog.Logf("Removing checkpoint %q", filename)
_, err := exec.Command("sudo", "rm", filename).CombinedOutput() _, err := exec.Command("sudo", "rm", filename).CombinedOutput()
framework.ExpectNoError(err, "Failed to remove checkpoint file %q: %v", string(filename), err) framework.ExpectNoError(err, "Failed to remove checkpoint file %q: %v", string(filename), err)
} }
@ -175,7 +176,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
if len(checkpoints) == 0 { if len(checkpoints) == 0 {
return true, nil return true, nil
} }
framework.Logf("Checkpoint of %q still exists: %v", podName, checkpoints) e2elog.Logf("Checkpoint of %q still exists: %v", podName, checkpoints)
return false, nil return false, nil
}); err != nil { }); err != nil {
framework.Failf("Failed to observe checkpoint being removed within timeout: %v", err) framework.Failf("Failed to observe checkpoint being removed within timeout: %v", err)
@ -212,7 +213,7 @@ func findCheckpoints(match string) []string {
checkpoints := []string{} checkpoints := []string{}
stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput() stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput()
if err != nil { if err != nil {
framework.Logf("grep from dockershim checkpoint directory returns error: %v", err) e2elog.Logf("grep from dockershim checkpoint directory returns error: %v", err)
} }
if stdout == nil { if stdout == nil {
return checkpoints return checkpoints

View File

@ -36,6 +36,7 @@ import (
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -479,9 +480,9 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
Eventually(func() error { Eventually(func() error {
if expectedNodeCondition != noPressure { if expectedNodeCondition != noPressure {
if hasNodeCondition(f, expectedNodeCondition) { if hasNodeCondition(f, expectedNodeCondition) {
framework.Logf("Node has %s", expectedNodeCondition) e2elog.Logf("Node has %s", expectedNodeCondition)
} else { } else {
framework.Logf("Node does NOT have %s", expectedNodeCondition) e2elog.Logf("Node does NOT have %s", expectedNodeCondition)
} }
} }
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey) logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
@ -568,7 +569,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
} }
updatedPods := updatedPodList.Items updatedPods := updatedPodList.Items
for _, p := range updatedPods { for _, p := range updatedPods {
framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase) e2elog.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
} }
By("checking eviction ordering and ensuring important pods dont fail") By("checking eviction ordering and ensuring important pods dont fail")
@ -689,25 +690,25 @@ func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeCondi
func logInodeMetrics() { func logInodeMetrics() {
summary, err := getNodeSummary() summary, err := getNodeSummary()
if err != nil { if err != nil {
framework.Logf("Error getting summary: %v", err) e2elog.Logf("Error getting summary: %v", err)
return return
} }
if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.Inodes != nil && summary.Node.Runtime.ImageFs.InodesFree != nil { if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.Inodes != nil && summary.Node.Runtime.ImageFs.InodesFree != nil {
framework.Logf("imageFsInfo.Inodes: %d, imageFsInfo.InodesFree: %d", *summary.Node.Runtime.ImageFs.Inodes, *summary.Node.Runtime.ImageFs.InodesFree) e2elog.Logf("imageFsInfo.Inodes: %d, imageFsInfo.InodesFree: %d", *summary.Node.Runtime.ImageFs.Inodes, *summary.Node.Runtime.ImageFs.InodesFree)
} }
if summary.Node.Fs != nil && summary.Node.Fs.Inodes != nil && summary.Node.Fs.InodesFree != nil { if summary.Node.Fs != nil && summary.Node.Fs.Inodes != nil && summary.Node.Fs.InodesFree != nil {
framework.Logf("rootFsInfo.Inodes: %d, rootFsInfo.InodesFree: %d", *summary.Node.Fs.Inodes, *summary.Node.Fs.InodesFree) e2elog.Logf("rootFsInfo.Inodes: %d, rootFsInfo.InodesFree: %d", *summary.Node.Fs.Inodes, *summary.Node.Fs.InodesFree)
} }
for _, pod := range summary.Pods { for _, pod := range summary.Pods {
framework.Logf("Pod: %s", pod.PodRef.Name) e2elog.Logf("Pod: %s", pod.PodRef.Name)
for _, container := range pod.Containers { for _, container := range pod.Containers {
if container.Rootfs != nil && container.Rootfs.InodesUsed != nil { if container.Rootfs != nil && container.Rootfs.InodesUsed != nil {
framework.Logf("--- summary Container: %s inodeUsage: %d", container.Name, *container.Rootfs.InodesUsed) e2elog.Logf("--- summary Container: %s inodeUsage: %d", container.Name, *container.Rootfs.InodesUsed)
} }
} }
for _, volume := range pod.VolumeStats { for _, volume := range pod.VolumeStats {
if volume.FsStats.InodesUsed != nil { if volume.FsStats.InodesUsed != nil {
framework.Logf("--- summary Volume: %s inodeUsage: %d", volume.Name, *volume.FsStats.InodesUsed) e2elog.Logf("--- summary Volume: %s inodeUsage: %d", volume.Name, *volume.FsStats.InodesUsed)
} }
} }
} }
@ -716,25 +717,25 @@ func logInodeMetrics() {
func logDiskMetrics() { func logDiskMetrics() {
summary, err := getNodeSummary() summary, err := getNodeSummary()
if err != nil { if err != nil {
framework.Logf("Error getting summary: %v", err) e2elog.Logf("Error getting summary: %v", err)
return return
} }
if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.CapacityBytes != nil && summary.Node.Runtime.ImageFs.AvailableBytes != nil { if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.CapacityBytes != nil && summary.Node.Runtime.ImageFs.AvailableBytes != nil {
framework.Logf("imageFsInfo.CapacityBytes: %d, imageFsInfo.AvailableBytes: %d", *summary.Node.Runtime.ImageFs.CapacityBytes, *summary.Node.Runtime.ImageFs.AvailableBytes) e2elog.Logf("imageFsInfo.CapacityBytes: %d, imageFsInfo.AvailableBytes: %d", *summary.Node.Runtime.ImageFs.CapacityBytes, *summary.Node.Runtime.ImageFs.AvailableBytes)
} }
if summary.Node.Fs != nil && summary.Node.Fs.CapacityBytes != nil && summary.Node.Fs.AvailableBytes != nil { if summary.Node.Fs != nil && summary.Node.Fs.CapacityBytes != nil && summary.Node.Fs.AvailableBytes != nil {
framework.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes) e2elog.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes)
} }
for _, pod := range summary.Pods { for _, pod := range summary.Pods {
framework.Logf("Pod: %s", pod.PodRef.Name) e2elog.Logf("Pod: %s", pod.PodRef.Name)
for _, container := range pod.Containers { for _, container := range pod.Containers {
if container.Rootfs != nil && container.Rootfs.UsedBytes != nil { if container.Rootfs != nil && container.Rootfs.UsedBytes != nil {
framework.Logf("--- summary Container: %s UsedBytes: %d", container.Name, *container.Rootfs.UsedBytes) e2elog.Logf("--- summary Container: %s UsedBytes: %d", container.Name, *container.Rootfs.UsedBytes)
} }
} }
for _, volume := range pod.VolumeStats { for _, volume := range pod.VolumeStats {
if volume.FsStats.InodesUsed != nil { if volume.FsStats.InodesUsed != nil {
framework.Logf("--- summary Volume: %s UsedBytes: %d", volume.Name, *volume.FsStats.UsedBytes) e2elog.Logf("--- summary Volume: %s UsedBytes: %d", volume.Name, *volume.FsStats.UsedBytes)
} }
} }
} }
@ -743,22 +744,22 @@ func logDiskMetrics() {
func logMemoryMetrics() { func logMemoryMetrics() {
summary, err := getNodeSummary() summary, err := getNodeSummary()
if err != nil { if err != nil {
framework.Logf("Error getting summary: %v", err) e2elog.Logf("Error getting summary: %v", err)
return return
} }
if summary.Node.Memory != nil && summary.Node.Memory.WorkingSetBytes != nil && summary.Node.Memory.AvailableBytes != nil { if summary.Node.Memory != nil && summary.Node.Memory.WorkingSetBytes != nil && summary.Node.Memory.AvailableBytes != nil {
framework.Logf("Node.Memory.WorkingSetBytes: %d, Node.Memory.AvailableBytes: %d", *summary.Node.Memory.WorkingSetBytes, *summary.Node.Memory.AvailableBytes) e2elog.Logf("Node.Memory.WorkingSetBytes: %d, Node.Memory.AvailableBytes: %d", *summary.Node.Memory.WorkingSetBytes, *summary.Node.Memory.AvailableBytes)
} }
for _, sysContainer := range summary.Node.SystemContainers { for _, sysContainer := range summary.Node.SystemContainers {
if sysContainer.Name == stats.SystemContainerPods && sysContainer.Memory != nil && sysContainer.Memory.WorkingSetBytes != nil && sysContainer.Memory.AvailableBytes != nil { if sysContainer.Name == stats.SystemContainerPods && sysContainer.Memory != nil && sysContainer.Memory.WorkingSetBytes != nil && sysContainer.Memory.AvailableBytes != nil {
framework.Logf("Allocatable.Memory.WorkingSetBytes: %d, Allocatable.Memory.AvailableBytes: %d", *sysContainer.Memory.WorkingSetBytes, *sysContainer.Memory.AvailableBytes) e2elog.Logf("Allocatable.Memory.WorkingSetBytes: %d, Allocatable.Memory.AvailableBytes: %d", *sysContainer.Memory.WorkingSetBytes, *sysContainer.Memory.AvailableBytes)
} }
} }
for _, pod := range summary.Pods { for _, pod := range summary.Pods {
framework.Logf("Pod: %s", pod.PodRef.Name) e2elog.Logf("Pod: %s", pod.PodRef.Name)
for _, container := range pod.Containers { for _, container := range pod.Containers {
if container.Memory != nil && container.Memory.WorkingSetBytes != nil { if container.Memory != nil && container.Memory.WorkingSetBytes != nil {
framework.Logf("--- summary Container: %s WorkingSetBytes: %d", container.Name, *container.Memory.WorkingSetBytes) e2elog.Logf("--- summary Container: %s WorkingSetBytes: %d", container.Name, *container.Memory.WorkingSetBytes)
} }
} }
} }
@ -767,11 +768,11 @@ func logMemoryMetrics() {
func logPidMetrics() { func logPidMetrics() {
summary, err := getNodeSummary() summary, err := getNodeSummary()
if err != nil { if err != nil {
framework.Logf("Error getting summary: %v", err) e2elog.Logf("Error getting summary: %v", err)
return return
} }
if summary.Node.Rlimit != nil && summary.Node.Rlimit.MaxPID != nil && summary.Node.Rlimit.NumOfRunningProcesses != nil { if summary.Node.Rlimit != nil && summary.Node.Rlimit.MaxPID != nil && summary.Node.Rlimit.NumOfRunningProcesses != nil {
framework.Logf("Node.Rlimit.MaxPID: %d, Node.Rlimit.RunningProcesses: %d", *summary.Node.Rlimit.MaxPID, *summary.Node.Rlimit.NumOfRunningProcesses) e2elog.Logf("Node.Rlimit.MaxPID: %d, Node.Rlimit.RunningProcesses: %d", *summary.Node.Rlimit.MaxPID, *summary.Node.Rlimit.NumOfRunningProcesses)
} }
} }

View File

@ -26,6 +26,7 @@ import (
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu" "k8s.io/kubernetes/test/e2e/framework/gpu"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -142,7 +143,7 @@ func checkIfNvidiaGPUsExistOnNode() bool {
// Cannot use `lspci` because it is not installed on all distros by default. // Cannot use `lspci` because it is not installed on all distros by default.
err := exec.Command("/bin/sh", "-c", "find /sys/devices/pci* -type f | grep vendor | xargs cat | grep 0x10de").Run() err := exec.Command("/bin/sh", "-c", "find /sys/devices/pci* -type f | grep vendor | xargs cat | grep 0x10de").Run()
if err != nil { if err != nil {
framework.Logf("check for nvidia GPUs failed. Got Error: %v", err) e2elog.Logf("check for nvidia GPUs failed. Got Error: %v", err)
return false return false
} }
return true return true
@ -163,14 +164,14 @@ func logDevicePluginMetrics() {
if quantile, err = strconv.ParseFloat(string(val), 64); err != nil { if quantile, err = strconv.ParseFloat(string(val), 64); err != nil {
continue continue
} }
framework.Logf("Metric: %v ResourceName: %v Quantile: %v Latency: %v", msKey, resource, quantile, latency) e2elog.Logf("Metric: %v ResourceName: %v Quantile: %v Latency: %v", msKey, resource, quantile, latency)
} }
} }
case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginRegistrationCountKey: case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginRegistrationCountKey:
for _, sample := range samples { for _, sample := range samples {
resource := string(sample.Metric["resource_name"]) resource := string(sample.Metric["resource_name"])
count := sample.Value count := sample.Value
framework.Logf("Metric: %v ResourceName: %v Count: %v", msKey, resource, count) e2elog.Logf("Metric: %v ResourceName: %v Count: %v", msKey, resource, count)
} }
} }
} }

View File

@ -31,6 +31,7 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -50,7 +51,7 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity)
// this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes // this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName) command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName)
framework.Logf("Pod to run command: %v", command) e2elog.Logf("Pod to run command: %v", command)
pod := &apiv1.Pod{ pod := &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),
@ -119,7 +120,7 @@ func configureHugePages() error {
if err != nil { if err != nil {
return err return err
} }
framework.Logf("HugePages_Total is set to %v", numHugePages) e2elog.Logf("HugePages_Total is set to %v", numHugePages)
if numHugePages == 50 { if numHugePages == 50 {
return nil return nil
} }
@ -145,7 +146,7 @@ func pollResourceAsString(f *framework.Framework, resourceName string) string {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
amount := amountOfResourceAsString(node, resourceName) amount := amountOfResourceAsString(node, resourceName)
framework.Logf("amount of %v: %v", resourceName, amount) e2elog.Logf("amount of %v: %v", resourceName, amount)
return amount return amount
} }

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e_node/perf/workloads" "k8s.io/kubernetes/test/e2e_node/perf/workloads"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -96,7 +97,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
perf, err := wl.ExtractPerformanceFromLogs(podLogs) perf, err := wl.ExtractPerformanceFromLogs(podLogs)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Time to complete workload %s: %v", wl.Name(), perf) e2elog.Logf("Time to complete workload %s: %v", wl.Name(), perf)
} }
Context("Run node performance testing with pre-defined workloads", func() { Context("Run node performance testing with pre-defined workloads", func() {

View File

@ -34,6 +34,7 @@ import (
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1" coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -370,7 +371,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
By("Get node problem detector log") By("Get node problem detector log")
log, err := framework.GetPodLogs(c, ns, name, name) log, err := framework.GetPodLogs(c, ns, name, name)
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
framework.Logf("Node Problem Detector logs:\n %s", log) e2elog.Logf("Node Problem Detector logs:\n %s", log)
} }
By("Delete the node problem detector") By("Delete the node problem detector")
f.PodClient().Delete(name, metav1.NewDeleteOptions(0)) f.PodClient().Delete(name, metav1.NewDeleteOptions(0))

View File

@ -28,6 +28,7 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -47,7 +48,7 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *apiv1.Po
// this command takes the expected value and compares it against the actual value for the pod cgroup pids.max // this command takes the expected value and compares it against the actual value for the pod cgroup pids.max
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/pids/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName) command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/pids/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName)
framework.Logf("Pod to run command: %v", command) e2elog.Logf("Pod to run command: %v", command)
pod := &apiv1.Pod{ pod := &apiv1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()), Name: "pod" + string(uuid.NewUUID()),

View File

@ -43,6 +43,7 @@ import (
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/util/procfs" "k8s.io/kubernetes/pkg/util/procfs"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e_node/perftype" "k8s.io/kubernetes/test/e2e_node/perftype"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -143,9 +144,9 @@ func (r *ResourceCollector) GetCPUSummary() framework.ContainersCPUSummary {
func (r *ResourceCollector) LogLatest() { func (r *ResourceCollector) LogLatest() {
summary, err := r.GetLatest() summary, err := r.GetLatest()
if err != nil { if err != nil {
framework.Logf("%v", err) e2elog.Logf("%v", err)
} }
framework.Logf("%s", formatResourceUsageStats(summary)) e2elog.Logf("%s", formatResourceUsageStats(summary))
} }
// collectStats collects resource usage from Cadvisor. // collectStats collects resource usage from Cadvisor.
@ -153,12 +154,12 @@ func (r *ResourceCollector) collectStats(oldStatsMap map[string]*cadvisorapiv2.C
for _, name := range systemContainers { for _, name := range systemContainers {
ret, err := r.client.Stats(name, r.request) ret, err := r.client.Stats(name, r.request)
if err != nil { if err != nil {
framework.Logf("Error getting container stats, err: %v", err) e2elog.Logf("Error getting container stats, err: %v", err)
return return
} }
cStats, ok := ret[name] cStats, ok := ret[name]
if !ok { if !ok {
framework.Logf("Missing info/stats for container %q", name) e2elog.Logf("Missing info/stats for container %q", name)
return return
} }

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
@ -101,7 +102,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
return return
} }
if framework.TestContext.DumpLogsOnFailure { if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
} }
By("Recording processes in system cgroups") By("Recording processes in system cgroups")
recordSystemCgroupProcesses() recordSystemCgroupProcesses()

View File

@ -26,6 +26,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -57,7 +58,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
AfterEach(func() { AfterEach(func() {
result := om.GetLatestRuntimeOperationErrorRate() result := om.GetLatestRuntimeOperationErrorRate()
framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result)) e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
}) })
// This test measures and verifies the steady resource usage of node is within limit // This test measures and verifies the steady resource usage of node is within limit
@ -169,7 +170,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
deadline := time.Now().Add(monitoringTime) deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) { for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now()) timeLeft := deadline.Sub(time.Now())
framework.Logf("Still running...%v left", timeLeft) e2elog.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod { if timeLeft < reportingPeriod {
time.Sleep(timeLeft) time.Sleep(timeLeft)
} else { } else {
@ -190,14 +191,14 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
// Obtain memory PerfData // Obtain memory PerfData
usagePerContainer, err := rc.GetLatest() usagePerContainer, err := rc.GetLatest()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("%s", formatResourceUsageStats(usagePerContainer)) e2elog.Logf("%s", formatResourceUsageStats(usagePerContainer))
usagePerNode := make(framework.ResourceUsagePerNode) usagePerNode := make(framework.ResourceUsagePerNode)
usagePerNode[nodeName] = usagePerContainer usagePerNode[nodeName] = usagePerContainer
// Obtain CPU PerfData // Obtain CPU PerfData
cpuSummary := rc.GetCPUSummary() cpuSummary := rc.GetCPUSummary()
framework.Logf("%s", formatCPUSummary(cpuSummary)) e2elog.Logf("%s", formatCPUSummary(cpuSummary))
cpuSummaryPerNode := make(framework.NodesCPUSummary) cpuSummaryPerNode := make(framework.NodesCPUSummary)
cpuSummaryPerNode[nodeName] = cpuSummary cpuSummaryPerNode[nodeName] = cpuSummary
@ -238,9 +239,9 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", "))) errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
heapStats, err := framework.GetKubeletHeapStats(c, nodeName) heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
if err != nil { if err != nil {
framework.Logf("Unable to get heap stats from %q", nodeName) e2elog.Logf("Unable to get heap stats from %q", nodeName)
} else { } else {
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats) e2elog.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
} }
} }
} }
@ -287,7 +288,7 @@ func logPods(c clientset.Interface) {
nodeName := framework.TestContext.NodeName nodeName := framework.TestContext.NodeName
podList, err := framework.GetKubeletRunningPods(c, nodeName) podList, err := framework.GetKubeletRunningPods(c, nodeName)
if err != nil { if err != nil {
framework.Logf("Unable to retrieve kubelet pods for node %v", nodeName) e2elog.Logf("Unable to retrieve kubelet pods for node %v", nodeName)
} }
framework.Logf("%d pods are running on node %v", len(podList.Items), nodeName) e2elog.Logf("%d pods are running on node %v", len(podList.Items), nodeName)
} }

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"fmt" "fmt"
"os/exec" "os/exec"
@ -40,7 +41,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
podList, err := f.PodClient().List(metav1.ListOptions{}) podList, err := f.PodClient().List(metav1.ListOptions{})
if err != nil { if err != nil {
framework.Logf("Failed to list pods on node: %v", err) e2elog.Logf("Failed to list pods on node: %v", err)
continue continue
} }
@ -51,7 +52,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
} }
runningPods = append(runningPods, &pod) runningPods = append(runningPods, &pod)
} }
framework.Logf("Running pod count %d", len(runningPods)) e2elog.Logf("Running pod count %d", len(runningPods))
if len(runningPods) >= pod_count { if len(runningPods) >= pod_count {
break break
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e_node/services" "k8s.io/kubernetes/test/e2e_node/services"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -142,7 +143,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
break break
} }
if i < flakeRetry { if i < flakeRetry {
framework.Logf("No.%d attempt failed: %v, retrying...", i, err) e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err)
} else { } else {
framework.Failf("All %d attempts failed: %v", flakeRetry, err) framework.Failf("All %d attempts failed: %v", flakeRetry, err)
} }

View File

@ -29,6 +29,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -165,7 +166,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
} }
pids := strings.TrimSpace(logs) pids := strings.TrimSpace(logs)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName) e2elog.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
if pids == "" { if pids == "" {
framework.Failf("nginx's pid should be seen by hostpid containers") framework.Failf("nginx's pid should be seen by hostpid containers")
} }
@ -185,7 +186,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
} }
pids := strings.TrimSpace(logs) pids := strings.TrimSpace(logs)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName) e2elog.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
pidSets := sets.NewString(strings.Split(pids, " ")...) pidSets := sets.NewString(strings.Split(pids, " ")...)
if pidSets.Has(nginxPid) { if pidSets.Has(nginxPid) {
framework.Failf("nginx's pid should not be seen by non-hostpid containers") framework.Failf("nginx's pid should not be seen by non-hostpid containers")
@ -229,7 +230,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
framework.Failf("Failed to create the shared memory on the host: %v", err) framework.Failf("Failed to create the shared memory on the host: %v", err)
} }
hostSharedMemoryID = strings.TrimSpace(string(output)) hostSharedMemoryID = strings.TrimSpace(string(output))
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID) e2elog.Logf("Got host shared memory ID %q", hostSharedMemoryID)
}) })
It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() { It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
@ -241,7 +242,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
} }
podSharedMemoryIDs := strings.TrimSpace(logs) podSharedMemoryIDs := strings.TrimSpace(logs)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName) e2elog.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) { if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
framework.Failf("hostIPC container should show shared memory IDs on host") framework.Failf("hostIPC container should show shared memory IDs on host")
} }
@ -256,7 +257,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
} }
podSharedMemoryIDs := strings.TrimSpace(logs) podSharedMemoryIDs := strings.TrimSpace(logs)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName) e2elog.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) { if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
framework.Failf("non-hostIPC container should not show shared memory IDs on host") framework.Failf("non-hostIPC container should not show shared memory IDs on host")
} }
@ -312,7 +313,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
} }
addr := strings.Split(l.Addr().String(), ":") addr := strings.Split(l.Addr().String(), ":")
listeningPort = addr[len(addr)-1] listeningPort = addr[len(addr)-1]
framework.Logf("Opened a new tcp port %q", listeningPort) e2elog.Logf("Opened a new tcp port %q", listeningPort)
}) })
It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() { It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
@ -323,7 +324,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
} }
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs) e2elog.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if !strings.Contains(logs, listeningPort) { if !strings.Contains(logs, listeningPort) {
framework.Failf("host-networked container should listening on same port as host") framework.Failf("host-networked container should listening on same port as host")
} }
@ -337,7 +338,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
} }
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs) e2elog.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if strings.Contains(logs, listeningPort) { if strings.Contains(logs, listeningPort) {
framework.Failf("non-hostnetworked container shouldn't show the same port as host") framework.Failf("non-hostnetworked container shouldn't show the same port as host")
} }
@ -389,7 +390,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
} }
framework.Logf("Got logs for pod %q: %q", podName, logs) e2elog.Logf("Got logs for pod %q: %q", podName, logs)
if strings.Contains(logs, "Operation not permitted") { if strings.Contains(logs, "Operation not permitted") {
framework.Failf("privileged container should be able to create dummy device") framework.Failf("privileged container should be able to create dummy device")
} }

View File

@ -28,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
systemdutil "github.com/coreos/go-systemd/util" systemdutil "github.com/coreos/go-systemd/util"
@ -45,7 +46,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
return return
} }
if framework.TestContext.DumpLogsOnFailure { if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
} }
By("Recording processes in system cgroups") By("Recording processes in system cgroups")
recordSystemCgroupProcesses() recordSystemCgroupProcesses()
@ -151,7 +152,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
} }
// The Kubelet only manages the 'misc' system container if the host is not running systemd. // The Kubelet only manages the 'misc' system container if the host is not running systemd.
if !systemdutil.IsRunningSystemd() { if !systemdutil.IsRunningSystemd() {
framework.Logf("Host not running systemd; expecting 'misc' system container.") e2elog.Logf("Host not running systemd; expecting 'misc' system container.")
miscContExpectations := sysContExpectations().(*gstruct.FieldsMatcher) miscContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
// Misc processes are system-dependent, so relax the memory constraints. // Misc processes are system-dependent, so relax the memory constraints.
miscContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{ miscContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
@ -418,7 +419,7 @@ func recent(d time.Duration) types.GomegaMatcher {
func recordSystemCgroupProcesses() { func recordSystemCgroupProcesses() {
cfg, err := getCurrentKubeletConfig() cfg, err := getCurrentKubeletConfig()
if err != nil { if err != nil {
framework.Logf("Failed to read kubelet config: %v", err) e2elog.Logf("Failed to read kubelet config: %v", err)
return return
} }
cgroups := map[string]string{ cgroups := map[string]string{
@ -427,24 +428,24 @@ func recordSystemCgroupProcesses() {
} }
for name, cgroup := range cgroups { for name, cgroup := range cgroups {
if cgroup == "" { if cgroup == "" {
framework.Logf("Skipping unconfigured cgroup %s", name) e2elog.Logf("Skipping unconfigured cgroup %s", name)
continue continue
} }
pids, err := ioutil.ReadFile(fmt.Sprintf("/sys/fs/cgroup/cpu/%s/cgroup.procs", cgroup)) pids, err := ioutil.ReadFile(fmt.Sprintf("/sys/fs/cgroup/cpu/%s/cgroup.procs", cgroup))
if err != nil { if err != nil {
framework.Logf("Failed to read processes in cgroup %s: %v", name, err) e2elog.Logf("Failed to read processes in cgroup %s: %v", name, err)
continue continue
} }
framework.Logf("Processes in %s cgroup (%s):", name, cgroup) e2elog.Logf("Processes in %s cgroup (%s):", name, cgroup)
for _, pid := range strings.Fields(string(pids)) { for _, pid := range strings.Fields(string(pids)) {
path := fmt.Sprintf("/proc/%s/cmdline", pid) path := fmt.Sprintf("/proc/%s/cmdline", pid)
cmd, err := ioutil.ReadFile(path) cmd, err := ioutil.ReadFile(path)
if err != nil { if err != nil {
framework.Logf(" Failed to read %s: %v", path, err) e2elog.Logf(" Failed to read %s: %v", path, err)
} else { } else {
framework.Logf(" %s", cmd) e2elog.Logf(" %s", cmd)
} }
} }
} }

View File

@ -28,6 +28,7 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -78,7 +79,7 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
return nil return nil
} }
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure) msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
framework.Logf(msg) e2elog.Logf(msg)
return fmt.Errorf(msg) return fmt.Errorf(msg)
}, time.Minute*2, time.Second*4).Should(BeNil()) }, time.Minute*2, time.Second*4).Should(BeNil())
@ -86,9 +87,9 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
Consistently(func() error { Consistently(func() error {
err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
if err == nil { if err == nil {
framework.Logf("mirror pod %q is running", mirrorPodName) e2elog.Logf("mirror pod %q is running", mirrorPodName)
} else { } else {
framework.Logf(err.Error()) e2elog.Logf(err.Error())
} }
return err return err
}, time.Minute*8, time.Second*4).ShouldNot(HaveOccurred()) }, time.Minute*8, time.Second*4).ShouldNot(HaveOccurred())

View File

@ -48,6 +48,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/remote" "k8s.io/kubernetes/pkg/kubelet/remote"
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics" frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -327,13 +328,13 @@ func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfigura
} }
func logPodEvents(f *framework.Framework) { func logPodEvents(f *framework.Framework) {
framework.Logf("Summary of pod events during the test:") e2elog.Logf("Summary of pod events during the test:")
err := framework.ListNamespaceEvents(f.ClientSet, f.Namespace.Name) err := framework.ListNamespaceEvents(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func logNodeEvents(f *framework.Framework) { func logNodeEvents(f *framework.Framework) {
framework.Logf("Summary of node events during the test:") e2elog.Logf("Summary of node events during the test:")
err := framework.ListNamespaceEvents(f.ClientSet, "") err := framework.ListNamespaceEvents(f.ClientSet, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -354,9 +355,9 @@ func logKubeletLatencyMetrics(metricNames ...string) {
} }
metric, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") metric, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
if err != nil { if err != nil {
framework.Logf("Error getting kubelet metrics: %v", err) e2elog.Logf("Error getting kubelet metrics: %v", err)
} else { } else {
framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletLatencyMetrics(metric, metricSet)) e2elog.Logf("Kubelet Metrics: %+v", framework.GetKubeletLatencyMetrics(metric, metricSet))
} }
} }
@ -418,7 +419,7 @@ func restartKubelet() {
matches := regex.FindStringSubmatch(string(stdout)) matches := regex.FindStringSubmatch(string(stdout))
Expect(len(matches)).NotTo(BeZero()) Expect(len(matches)).NotTo(BeZero())
kube := matches[0] kube := matches[0]
framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube) e2elog.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput() stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout) framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout)
} }