Use log functions of core framework on [c-n]
This makes sub packages of e2e test framework to use log functions of core framework instead for avoiding circular dependencies.
This commit is contained in:
@@ -46,7 +46,6 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epsp "k8s.io/kubernetes/test/e2e/framework/psp"
|
||||
@@ -225,7 +224,7 @@ func (f *Framework) BeforeEach() {
|
||||
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
ExpectNoError(err)
|
||||
} else {
|
||||
e2elog.Logf("Skipping waiting for service account")
|
||||
Logf("Skipping waiting for service account")
|
||||
}
|
||||
f.UniqueName = f.Namespace.GetName()
|
||||
} else {
|
||||
@@ -253,7 +252,7 @@ func (f *Framework) BeforeEach() {
|
||||
PrintVerboseLogs: false,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
e2elog.Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||
} else {
|
||||
go f.gatherer.StartGatheringData()
|
||||
}
|
||||
@@ -274,13 +273,13 @@ func (f *Framework) BeforeEach() {
|
||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
} else {
|
||||
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
|
||||
if err != nil {
|
||||
e2elog.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
e2elog.Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,15 +310,15 @@ func (f *Framework) AfterEach() {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
} else {
|
||||
e2elog.Logf("Namespace %v was already deleted", ns.Name)
|
||||
Logf("Namespace %v was already deleted", ns.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !TestContext.DeleteNamespace {
|
||||
e2elog.Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
} else {
|
||||
e2elog.Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
|
||||
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,7 +333,7 @@ func (f *Framework) AfterEach() {
|
||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||
}
|
||||
e2elog.Failf(strings.Join(messages, ","))
|
||||
Failf(strings.Join(messages, ","))
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -366,11 +365,11 @@ func (f *Framework) AfterEach() {
|
||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
e2elog.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
}
|
||||
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
|
||||
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
|
||||
@@ -391,7 +390,7 @@ func (f *Framework) AfterEach() {
|
||||
// This is explicitly done at the very end of the test, to avoid
|
||||
// e.g. not removing namespace in case of this failure.
|
||||
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
||||
e2elog.Failf("All nodes should be ready after test, %v", err)
|
||||
Failf("All nodes should be ready after test, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,7 +489,7 @@ func (f *Framework) WriteFileViaContainer(podName, containerName string, path st
|
||||
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -501,7 +500,7 @@ func (f *Framework) ReadFileViaContainer(podName, containerName string, path str
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
@@ -512,7 +511,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
@@ -549,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
TargetPort: intstr.FromInt(contPort),
|
||||
}}
|
||||
}
|
||||
e2elog.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service-for-" + appName,
|
||||
@@ -575,7 +574,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
|
||||
for i, node := range nodes.Items {
|
||||
// one per node, but no more than maxCount.
|
||||
if i <= maxCount {
|
||||
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
@@ -646,19 +645,19 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
@@ -683,7 +682,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
|
||||
cmd := KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
@@ -790,7 +789,7 @@ func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Names
|
||||
|
||||
ns := namespace.Name
|
||||
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
|
||||
e2elog.Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
|
||||
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
|
||||
if len(pl.Items) == 0 || err != nil {
|
||||
return pl.Items, err
|
||||
}
|
||||
@@ -805,7 +804,7 @@ ReturnPodsSoFar:
|
||||
}
|
||||
passesVerify, err := passesVerifyFilter(pod, p.Verify)
|
||||
if err != nil {
|
||||
e2elog.Logf("Error detected on %v : %v !", pod.Name, err)
|
||||
Logf("Error detected on %v : %v !", pod.Name, err)
|
||||
break ReturnPodsSoFar
|
||||
}
|
||||
if passesVerify {
|
||||
@@ -826,12 +825,12 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
|
||||
|
||||
// Failure
|
||||
if returnedErr != nil {
|
||||
e2elog.Logf("Cutting polling short: We got an error from the pod filtering layer.")
|
||||
Logf("Cutting polling short: We got an error from the pod filtering layer.")
|
||||
// stop polling if the pod filtering returns an error. that should never happen.
|
||||
// it indicates, for example, that the client is broken or something non-pod related.
|
||||
return false, returnedErr
|
||||
}
|
||||
e2elog.Logf("Found %v / %v", len(pods), atLeast)
|
||||
Logf("Found %v / %v", len(pods), atLeast)
|
||||
|
||||
// Success
|
||||
if len(pods) >= atLeast {
|
||||
@@ -840,7 +839,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
|
||||
// Keep trying...
|
||||
return false, nil
|
||||
})
|
||||
e2elog.Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
|
||||
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
|
||||
return pods, err
|
||||
}
|
||||
|
||||
@@ -848,7 +847,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
|
||||
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
|
||||
pods, err := cl.WaitFor(atLeast, timeout)
|
||||
if err != nil || len(pods) < atLeast {
|
||||
e2elog.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
||||
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -861,14 +860,14 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
|
||||
pods, err := cl.podState.filter(cl.client, cl.namespace)
|
||||
if err == nil {
|
||||
if len(pods) == 0 {
|
||||
e2elog.Failf("No pods matched the filter.")
|
||||
Failf("No pods matched the filter.")
|
||||
}
|
||||
e2elog.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
|
||||
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
|
||||
for _, p := range pods {
|
||||
podFunc(p)
|
||||
}
|
||||
} else {
|
||||
e2elog.Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
|
||||
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -880,7 +879,7 @@ func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
|
||||
return func(format string, args ...interface{}) {
|
||||
writer := bufio.NewWriter(file)
|
||||
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
|
||||
e2elog.Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
|
||||
Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user