refactor: replace framework.Failf with e2elog.Failf

This commit is contained in:
SataQiu 2019-06-19 17:52:35 +08:00
parent 9162d932cf
commit 332be4b1e3
144 changed files with 767 additions and 723 deletions

View File

@ -274,7 +274,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
} }
if len(newKeys.List()) != len(existingKeys.List()) || if len(newKeys.List()) != len(existingKeys.List()) ||
!newKeys.IsSuperset(existingKeys) { !newKeys.IsSuperset(existingKeys) {
framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) e2elog.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
} }
}) })
@ -312,7 +312,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts { if postRestarts != preRestarts {
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf) framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf)
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) e2elog.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
} }
}) })
}) })

View File

@ -418,7 +418,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
case newDS.Spec.Template.Spec.Containers[0].Image: case newDS.Spec.Template.Spec.Containers[0].Image:
newPods = append(newPods, &pod) newPods = append(newPods, &pod)
default: default:
framework.Failf("unexpected pod found, image = %s", image) e2elog.Failf("unexpected pod found, image = %s", image)
} }
} }
schedulableNodes = framework.GetReadySchedulableNodesOrDie(c) schedulableNodes = framework.GetReadySchedulableNodesOrDie(c)
@ -655,7 +655,7 @@ func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
fit, _, err := daemon.Predicates(newPod, nodeInfo) fit, _, err := daemon.Predicates(newPod, nodeInfo)
if err != nil { if err != nil {
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err) e2elog.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
return false return false
} }
return fit return fit

View File

@ -222,7 +222,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
} }
return false, nil return false, nil
}); err != nil { }); err != nil {
framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) e2elog.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
} }
} }
@ -382,14 +382,14 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
} }
numPodCreation-- numPodCreation--
if numPodCreation < 0 { if numPodCreation < 0 {
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event) e2elog.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
} }
pod, ok := event.Object.(*v1.Pod) pod, ok := event.Object.(*v1.Pod)
if !ok { if !ok {
framework.Failf("Expect event Object to be a pod") e2elog.Failf("Expect event Object to be a pod")
} }
if pod.Spec.Containers[0].Name != RedisImageName { if pod.Spec.Containers[0].Name != RedisImageName {
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod) e2elog.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
} }
case <-stopCh: case <-stopCh:
return return

View File

@ -26,6 +26,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job" jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -206,7 +207,7 @@ var _ = SIGDescribe("Job", func() {
// updates we need to allow more than backoff+1 // updates we need to allow more than backoff+1
// TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed // TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
if len(pods.Items) < backoff+1 { if len(pods.Items) < backoff+1 {
framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items) e2elog.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
} }
for _, pod := range pods.Items { for _, pod := range pods.Items {
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed)) gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed))

View File

@ -70,7 +70,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
} }
} }
if !expected { if !expected {
framework.Failf("Failed to observe node ready status change to %v", isReady) e2elog.Failf("Failed to observe node ready status change to %v", isReady)
} }
} }
@ -120,7 +120,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
framework.SkipUnlessProviderIs("gke", "aws") framework.SkipUnlessProviderIs("gke", "aws")
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} }
}) })
@ -155,12 +155,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return true return true
}) })
if len(nodes.Items) <= 0 { if len(nodes.Items) <= 0 {
framework.Failf("No eligible node were found: %d", len(nodes.Items)) e2elog.Failf("No eligible node were found: %d", len(nodes.Items))
} }
node := nodes.Items[0] node := nodes.Items[0]
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
ginkgo.By("Set up watch on node status") ginkgo.By("Set up watch on node status")
@ -216,7 +216,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
expectNodeReadiness(true, newNode) expectNodeReadiness(true, newNode)
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) e2elog.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
}() }()
@ -227,7 +227,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode) expectNodeReadiness(false, newNode)
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
} }
}) })
}) })
@ -276,7 +276,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
// sleep a bit, to allow Watch in NodeController to catch up. // sleep a bit, to allow Watch in NodeController to catch up.
@ -343,7 +343,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
}) })
}) })
@ -416,7 +416,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
ginkgo.By("waiting for pods to be running again") ginkgo.By("waiting for pods to be running again")
@ -464,7 +464,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
}) })
}) })
@ -498,12 +498,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return true return true
}) })
if len(nodes.Items) <= 0 { if len(nodes.Items) <= 0 {
framework.Failf("No eligible node were found: %d", len(nodes.Items)) e2elog.Failf("No eligible node were found: %d", len(nodes.Items))
} }
node := nodes.Items[0] node := nodes.Items[0]
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil { if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -609,7 +609,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate) return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate)
})) }))
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
} }
sleepTime := maxTolerationTime + 20*time.Second sleepTime := maxTolerationTime + 20*time.Second
@ -629,7 +629,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
if pod.DeletionTimestamp == nil { if pod.DeletionTimestamp == nil {
seenRunning = append(seenRunning, namespacedName) seenRunning = append(seenRunning, namespacedName)
if shouldBeTerminating { if shouldBeTerminating {
framework.Failf("Pod %v should have been deleted but was seen running", namespacedName) e2elog.Failf("Pod %v should have been deleted but was seen running", namespacedName)
} }
} }
} }
@ -643,7 +643,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
} }
} }
if !running { if !running {
framework.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod) e2elog.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod)
} }
} }
}) })

View File

@ -167,7 +167,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
} }

View File

@ -169,7 +169,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
} }

View File

@ -741,7 +741,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
if err := f.WaitForPodRunning(podName); err != nil { if err := f.WaitForPodRunning(podName); err != nil {
framework.Failf("Pod %v did not start running: %v", podName, err) e2elog.Failf("Pod %v did not start running: %v", podName, err)
} }
var initialStatefulPodUID types.UID var initialStatefulPodUID types.UID
@ -767,7 +767,7 @@ var _ = SIGDescribe("StatefulSet", func() {
return false, nil return false, nil
}) })
if err != nil { if err != nil {
framework.Failf("Pod %v expected to be re-created at least once", statefulPodName) e2elog.Failf("Pod %v expected to be re-created at least once", statefulPodName)
} }
ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name) ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
@ -803,7 +803,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("getting scale subresource") ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{}) scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get scale subresource: %v", err) e2elog.Failf("Failed to get scale subresource: %v", err)
} }
gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1)))
gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1)))
@ -812,14 +812,14 @@ var _ = SIGDescribe("StatefulSet", func() {
scale.Spec.Replicas = 2 scale.Spec.Replicas = 2
scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale) scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale)
if err != nil { if err != nil {
framework.Failf("Failed to put scale subresource: %v", err) e2elog.Failf("Failed to put scale subresource: %v", err)
} }
gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2)))
ginkgo.By("verifying the statefulset Spec.Replicas was modified") ginkgo.By("verifying the statefulset Spec.Replicas was modified")
ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{}) ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get statefulset resource: %v", err) e2elog.Failf("Failed to get statefulset resource: %v", err)
} }
gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2))) gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2)))
}) })
@ -880,7 +880,7 @@ func kubectlExecWithRetries(args ...string) (out string) {
} }
e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
} }
framework.Failf("Failed to execute \"%v\" with retries: %v", args, err) e2elog.Failf("Failed to execute \"%v\" with retries: %v", args, err)
return return
} }
@ -917,7 +917,7 @@ func (c *clusterAppTester) run() {
ginkgo.By("Reading value under foo from member with index 2") ginkgo.By("Reading value under foo from member with index 2")
if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil { if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil {
framework.Failf("%v", err) e2elog.Failf("%v", err)
} }
} }

View File

@ -408,7 +408,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
if hasServiceAccountTokenVolume != tc.ExpectTokenVolume { if hasServiceAccountTokenVolume != tc.ExpectTokenVolume {
framework.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod) e2elog.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod)
} else { } else {
e2elog.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume) e2elog.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume)
} }
@ -427,7 +427,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
"ca.crt": string(cfg.TLSClientConfig.CAData), "ca.crt": string(cfg.TLSClientConfig.CAData),
}, },
}); err != nil && !apierrors.IsAlreadyExists(err) { }); err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("Unexpected err creating kube-ca-crt: %v", err) e2elog.Failf("Unexpected err creating kube-ca-crt: %v", err)
} }
tenMin := int64(10 * 60) tenMin := int64(10 * 60)
@ -493,7 +493,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
e2elog.Logf("created pod") e2elog.Logf("created pod")
if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) e2elog.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
} }
e2elog.Logf("pod is ready") e2elog.Logf("pod is ready")
@ -516,7 +516,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
return true, nil return true, nil
}); err != nil { }); err != nil {
framework.Failf("Unexpected error: %v\n%s", err, logs) e2elog.Failf("Unexpected error: %v\n%s", err, logs)
} }
}) })
}) })

View File

@ -210,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" { if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined") e2elog.Failf("TEST_GPU_TYPE not defined")
return return
} }
@ -237,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" { if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined") e2elog.Failf("TEST_GPU_TYPE not defined")
return return
} }
@ -267,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" { if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined") e2elog.Failf("TEST_GPU_TYPE not defined")
return return
} }
@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" { if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined") e2elog.Failf("TEST_GPU_TYPE not defined")
return return
} }
@ -498,7 +498,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer func() { defer func() {
errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc) errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
if len(errs) > 0 { if len(errs) > 0 {
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pv, pvc = nil, nil pv, pvc = nil, nil
if diskName != "" { if diskName != "" {
@ -1300,7 +1300,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
} }
} }
framework.Failf("Failed to reserve memory within timeout") e2elog.Failf("Failed to reserve memory within timeout")
return nil return nil
} }
@ -1871,7 +1871,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
} }
} }
if finalErr != nil { if finalErr != nil {
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr) e2elog.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
} }
} }

View File

@ -242,33 +242,33 @@ func (tc *CustomMetricTestCase) Run() {
ts, err := google.DefaultTokenSource(oauth2.NoContext) ts, err := google.DefaultTokenSource(oauth2.NoContext)
e2elog.Logf("Couldn't get application default credentials, %v", err) e2elog.Logf("Couldn't get application default credentials, %v", err)
if err != nil { if err != nil {
framework.Failf("Error accessing application default credentials, %v", err) e2elog.Failf("Error accessing application default credentials, %v", err)
} }
client := oauth2.NewClient(oauth2.NoContext, ts) client := oauth2.NewClient(oauth2.NoContext, ts)
*/ */
gcmService, err := gcm.New(client) gcmService, err := gcm.New(client)
if err != nil { if err != nil {
framework.Failf("Failed to create gcm service, %v", err) e2elog.Failf("Failed to create gcm service, %v", err)
} }
// Set up a cluster: create a custom metric and set up k8s-sd adapter // Set up a cluster: create a custom metric and set up k8s-sd adapter
err = monitoring.CreateDescriptors(gcmService, projectID) err = monitoring.CreateDescriptors(gcmService, projectID)
if err != nil { if err != nil {
framework.Failf("Failed to create metric descriptor: %v", err) e2elog.Failf("Failed to create metric descriptor: %v", err)
} }
defer monitoring.CleanupDescriptors(gcmService, projectID) defer monitoring.CleanupDescriptors(gcmService, projectID)
err = monitoring.CreateAdapter(monitoring.AdapterDefault) err = monitoring.CreateAdapter(monitoring.AdapterDefault)
if err != nil { if err != nil {
framework.Failf("Failed to set up: %v", err) e2elog.Failf("Failed to set up: %v", err)
} }
defer monitoring.CleanupAdapter(monitoring.AdapterDefault) defer monitoring.CleanupAdapter(monitoring.AdapterDefault)
// Run application that exports the metric // Run application that exports the metric
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
if err != nil { if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %v", err) e2elog.Failf("Failed to create stackdriver-exporter pod: %v", err)
} }
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
@ -278,7 +278,7 @@ func (tc *CustomMetricTestCase) Run() {
// Autoscale the deployment // Autoscale the deployment
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa) _, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa)
if err != nil { if err != nil {
framework.Failf("Failed to create HPA: %v", err) e2elog.Failf("Failed to create HPA: %v", err)
} }
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{}) defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
@ -442,13 +442,13 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err) e2elog.Failf("Failed to get replication controller %s: %v", deployment, err)
} }
replicas := int(deployment.Status.ReadyReplicas) replicas := int(deployment.Status.ReadyReplicas)
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas) e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit. return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
}) })
if err != nil { if err != nil {
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas) e2elog.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
} }
} }

View File

@ -52,7 +52,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
err := framework.DeleteNodeOnCloudProvider(&nodeToDelete) err := framework.DeleteNodeOnCloudProvider(&nodeToDelete)
if err != nil { if err != nil {
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err) e2elog.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
} }
newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute) newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute)
@ -61,9 +61,9 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
_, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{}) _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
if err == nil { if err == nil {
framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name) e2elog.Failf("node %q still exists when it should be deleted", nodeToDelete.Name)
} else if !apierrs.IsNotFound(err) { } else if !apierrs.IsNotFound(err) {
framework.Failf("failed to get node %q err: %q", nodeToDelete.Name, err) e2elog.Failf("failed to get node %q err: %q", nodeToDelete.Name, err)
} }
}) })

View File

@ -336,25 +336,25 @@ func (rc *ResourceConsumer) GetReplicas() int {
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{}) replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if replicationController == nil { if replicationController == nil {
framework.Failf(rcIsNil) e2elog.Failf(rcIsNil)
} }
return int(replicationController.Status.ReadyReplicas) return int(replicationController.Status.ReadyReplicas)
case KindDeployment: case KindDeployment:
deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{}) deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if deployment == nil { if deployment == nil {
framework.Failf(deploymentIsNil) e2elog.Failf(deploymentIsNil)
} }
return int(deployment.Status.ReadyReplicas) return int(deployment.Status.ReadyReplicas)
case KindReplicaSet: case KindReplicaSet:
rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{}) rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if rs == nil { if rs == nil {
framework.Failf(rsIsNil) e2elog.Failf(rsIsNil)
} }
return int(rs.Status.ReadyReplicas) return int(rs.Status.ReadyReplicas)
default: default:
framework.Failf(invalidKind) e2elog.Failf(invalidKind)
} }
return 0 return 0
} }
@ -488,7 +488,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig)) framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig))
break break
default: default:
framework.Failf(invalidKind) e2elog.Failf(invalidKind)
} }
ginkgo.By(fmt.Sprintf("Running controller")) ginkgo.By(fmt.Sprintf("Running controller"))

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -42,7 +43,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -90,7 +91,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -133,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -220,7 +221,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -338,12 +339,12 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -458,7 +459,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
@ -486,7 +487,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -594,7 +595,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
one := int64(1) one := int64(1)
@ -671,7 +672,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
one := int64(1) one := int64(1)
@ -806,7 +807,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
//creating a pod with configMap object, but with different key which is not present in configMap object. //creating a pod with configMap object, but with different key which is not present in configMap object.
pod := &v1.Pod{ pod := &v1.Pod{

View File

@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
initialDelay := probeTestInitialDelaySeconds * time.Second initialDelay := probeTestInitialDelaySeconds * time.Second
if readyTime.Sub(startedTime) < initialDelay { if readyTime.Sub(startedTime) < initialDelay {
framework.Failf("Pod became ready before it's %v initial delay", initialDelay) e2elog.Failf("Pod became ready before it's %v initial delay", initialDelay)
} }
restartCount := getRestartCount(p) restartCount := getRestartCount(p)
@ -443,7 +443,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
e2elog.Logf("Restart count of pod %s/%s is now %d (%v elapsed)", e2elog.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start)) ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount { if restartCount < lastRestartCount {
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d", e2elog.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount) ns, pod.Name, lastRestartCount, restartCount)
} }
} }
@ -459,7 +459,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// If we expected n restarts (n > 0), fail if we observed < n restarts. // If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 && if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) { int(observedRestarts) < expectNumRestarts) {
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d", e2elog.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts) ns, pod.Name, expectNumRestarts, observedRestarts)
} }
} }

View File

@ -482,14 +482,14 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
cmd := "touch /volume_mount/mypath/foo/test.log" cmd := "touch /volume_mount/mypath/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil { if err != nil {
framework.Failf("expected to be able to write to subpath") e2elog.Failf("expected to be able to write to subpath")
} }
ginkgo.By("test for file in mounted path") ginkgo.By("test for file in mounted path")
cmd = "test -f /subpath_mount/test.log" cmd = "test -f /subpath_mount/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil { if err != nil {
framework.Failf("expected to be able to verify file") e2elog.Failf("expected to be able to verify file")
} }
ginkgo.By("updating the annotation value") ginkgo.By("updating the annotation value")
@ -629,13 +629,13 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
cmd := "test -f /volume_mount/foo/test.log" cmd := "test -f /volume_mount/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil { if err != nil {
framework.Failf("expected to be able to verify old file exists") e2elog.Failf("expected to be able to verify old file exists")
} }
cmd = "test ! -f /volume_mount/newsubpath/test.log" cmd = "test ! -f /volume_mount/newsubpath/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil { if err != nil {
framework.Failf("expected to be able to verify new file does not exist") e2elog.Failf("expected to be able to verify new file does not exist")
} }
}) })
}) })

View File

@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -136,11 +137,11 @@ func assertManagedStatus(
} }
if expectedIsManaged { if expectedIsManaged {
framework.Failf( e2elog.Failf(
"/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q", "/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent) name, retryCount, etcHostsContent)
} else { } else {
framework.Failf( e2elog.Failf(
"/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q", "/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent) name, retryCount, etcHostsContent)
} }

View File

@ -73,7 +73,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
break break
} }
if time.Since(t) >= hostIPTimeout { if time.Since(t) >= hostIPTimeout {
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds", e2elog.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds()) p.Name, time.Since(t).Seconds())
} }
e2elog.Logf("Retrying to get the hostIP of pod %s", p.Name) e2elog.Logf("Retrying to get the hostIP of pod %s", p.Name)
@ -91,19 +91,19 @@ func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAm
ginkgo.By("getting restart delay-0") ginkgo.By("getting restart delay-0")
_, err := getRestartDelay(podClient, podName, containerName) _, err := getRestartDelay(podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
ginkgo.By("getting restart delay-1") ginkgo.By("getting restart delay-1")
delay1, err := getRestartDelay(podClient, podName, containerName) delay1, err := getRestartDelay(podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
ginkgo.By("getting restart delay-2") ginkgo.By("getting restart delay-2")
delay2, err := getRestartDelay(podClient, podName, containerName) delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
return delay1, delay2 return delay1, delay2
} }
@ -265,13 +265,13 @@ var _ = framework.KubeDescribe("Pods", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event) e2elog.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(framework.PodStartTimeout): case <-time.After(framework.PodStartTimeout):
framework.Failf("Timeout while waiting for pod creation") e2elog.Failf("Timeout while waiting for pod creation")
} }
case <-time.After(10 * time.Second): case <-time.After(10 * time.Second):
framework.Failf("Timeout while waiting to observe pod list") e2elog.Failf("Timeout while waiting to observe pod list")
} }
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
@ -319,14 +319,14 @@ var _ = framework.KubeDescribe("Pods", func() {
deleted = true deleted = true
case watch.Error: case watch.Error:
e2elog.Logf("received a watch error: %v", event.Object) e2elog.Logf("received a watch error: %v", event.Object)
framework.Failf("watch closed with error") e2elog.Failf("watch closed with error")
} }
case <-timer: case <-timer:
framework.Failf("timed out waiting for pod deletion") e2elog.Failf("timed out waiting for pod deletion")
} }
} }
if !deleted { if !deleted {
framework.Failf("Failed to observe pod deletion") e2elog.Failf("Failed to observe pod deletion")
} }
gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil()) gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
@ -574,7 +574,7 @@ var _ = framework.KubeDescribe("Pods", func() {
url := req.URL() url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"}) ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
if err != nil { if err != nil {
framework.Failf("Failed to open websocket to %s: %v", url.String(), err) e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err)
} }
defer ws.Close() defer ws.Close()
@ -586,7 +586,7 @@ var _ = framework.KubeDescribe("Pods", func() {
if err == io.EOF { if err == io.EOF {
break break
} }
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err) e2elog.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
} }
if len(msg) == 0 { if len(msg) == 0 {
continue continue
@ -596,7 +596,7 @@ var _ = framework.KubeDescribe("Pods", func() {
// skip an empty message on stream other than stdout // skip an empty message on stream other than stdout
continue continue
} else { } else {
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg) e2elog.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
} }
} }
@ -653,7 +653,7 @@ var _ = framework.KubeDescribe("Pods", func() {
ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"}) ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
if err != nil { if err != nil {
framework.Failf("Failed to open websocket to %s: %v", url.String(), err) e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err)
} }
defer ws.Close() defer ws.Close()
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
@ -663,7 +663,7 @@ var _ = framework.KubeDescribe("Pods", func() {
if err == io.EOF { if err == io.EOF {
break break
} }
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err) e2elog.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
} }
if len(strings.TrimSpace(string(msg))) == 0 { if len(strings.TrimSpace(string(msg))) == 0 {
continue continue
@ -671,7 +671,7 @@ var _ = framework.KubeDescribe("Pods", func() {
buf.Write(msg) buf.Write(msg)
} }
if buf.String() != "container is alive\n" { if buf.String() != "container is alive\n" {
framework.Failf("Unexpected websocket logs:\n%s", buf.String()) e2elog.Failf("Unexpected websocket logs:\n%s", buf.String())
} }
}) })
@ -708,11 +708,11 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By("get restart delay after image update") ginkgo.By("get restart delay after image update")
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName) delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 { if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2) e2elog.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
} }
}) })
@ -748,7 +748,7 @@ var _ = framework.KubeDescribe("Pods", func() {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
delay1, err = getRestartDelay(podClient, podName, containerName) delay1, err = getRestartDelay(podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
if delay1 < kubelet.MaxContainerBackOff { if delay1 < kubelet.MaxContainerBackOff {
@ -757,17 +757,17 @@ var _ = framework.KubeDescribe("Pods", func() {
} }
if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) { if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1) e2elog.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
} }
ginkgo.By("getting restart delay after a capped delay") ginkgo.By("getting restart delay after a capped delay")
delay2, err := getRestartDelay(podClient, podName, containerName) delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil { if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
} }
if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2) e2elog.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
} }
}) })

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -63,11 +64,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected combined", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil) pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil)

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -133,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -248,12 +249,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -386,7 +387,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
@ -414,7 +415,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -521,7 +522,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -603,7 +604,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in
var err error var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -95,7 +96,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
) )
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) e2elog.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
} }
secret2 := secretForTest(namespace2.Name, secret2Name) secret2 := secretForTest(namespace2.Name, secret2Name)
@ -103,7 +104,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
} }
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret2.Name, err)
} }
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
}) })
@ -129,7 +130,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -256,12 +257,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) e2elog.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) e2elog.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -394,7 +395,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) e2elog.Failf("unable to create test secret %s: %v", createSecret.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
@ -436,7 +437,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -514,7 +515,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{

View File

@ -348,7 +348,7 @@ while true; do sleep 1; done
if i < flakeRetry { if i < flakeRetry {
e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err) e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err)
} else { } else {
framework.Failf("All %d attempts failed: %v", flakeRetry, err) e2elog.Failf("All %d attempts failed: %v", flakeRetry, err)
} }
} }
} }

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -43,7 +44,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -91,7 +92,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -100,7 +101,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
) )
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) e2elog.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
} }
secret2 := secretForTest(namespace2.Name, secret2Name) secret2 := secretForTest(namespace2.Name, secret2Name)
@ -108,7 +109,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
} }
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret2.Name, err)
} }
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
}) })
@ -134,7 +135,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -245,12 +246,12 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) e2elog.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
} }
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) e2elog.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -359,7 +360,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) e2elog.Failf("unable to create test secret %s: %v", createSecret.Name, err)
} }
ginkgo.By("waiting to observe update in volume") ginkgo.By("waiting to observe update in volume")
@ -415,7 +416,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -484,7 +485,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -602,7 +603,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
//creating a pod with secret object, with the key which is not present in secret object. //creating a pod with secret object, with the key which is not present in secret object.
pod := &v1.Pod{ pod := &v1.Pod{

View File

@ -260,12 +260,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
podName := createAndWaitUserPod(false) podName := createAndWaitUserPod(false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil { if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) e2elog.Failf("GetPodLogs for pod %q failed: %v", podName, err)
} }
e2elog.Logf("Got logs for pod %q: %q", podName, logs) e2elog.Logf("Got logs for pod %q: %q", podName, logs)
if !strings.Contains(logs, "Operation not permitted") { if !strings.Contains(logs, "Operation not permitted") {
framework.Failf("unprivileged container shouldn't be able to create dummy device") e2elog.Failf("unprivileged container shouldn't be able to create dummy device")
} }
}) })
}) })
@ -312,7 +312,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() { ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID()) podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil { if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err) e2elog.Failf("Match output for pod %q failed: %v", podName, err)
} }
}) })
@ -328,7 +328,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID()) podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil { if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err) e2elog.Failf("Match output for pod %q failed: %v", podName, err)
} }
}) })
@ -345,7 +345,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID()) podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil { if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err) e2elog.Failf("Match output for pod %q failed: %v", podName, err)
} }
}) })
}) })

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -100,11 +101,11 @@ func SubstituteImageName(content string) string {
contentWithImageName := new(bytes.Buffer) contentWithImageName := new(bytes.Buffer)
tmpl, err := template.New("imagemanifest").Parse(content) tmpl, err := template.New("imagemanifest").Parse(content)
if err != nil { if err != nil {
framework.Failf("Failed Parse the template: %v", err) e2elog.Failf("Failed Parse the template: %v", err)
} }
err = tmpl.Execute(contentWithImageName, testImages) err = tmpl.Execute(contentWithImageName, testImages)
if err != nil { if err != nil {
framework.Failf("Failed executing template: %v", err) e2elog.Failf("Failed executing template: %v", err)
} }
return contentWithImageName.String() return contentWithImageName.String()
} }

View File

@ -92,11 +92,11 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
metav1.NamespacePublic, metav1.NamespacePublic,
}) })
if err != nil { if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err) e2elog.Failf("Error deleting orphaned namespaces: %v", err)
} }
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted) klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil { if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) e2elog.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
} }
} }
@ -123,7 +123,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf) framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err) e2elog.Failf("Error waiting for all pods to be running and ready: %v", err)
} }
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {

View File

@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
} }
wg.Wait() wg.Wait()
if !passed { if !passed {
framework.Failf("At least one liveness example failed. See the logs above.") e2elog.Failf("At least one liveness example failed. See the logs above.")
} }
}) })
}) })

View File

@ -473,7 +473,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("failed to get ingress %s/%s: %v", ns, name, err) e2elog.Failf("failed to get ingress %s/%s: %v", ns, name, err)
} }
update(j.Ingress) update(j.Ingress)
j.Ingress, err = j.runUpdate(j.Ingress) j.Ingress, err = j.runUpdate(j.Ingress)
@ -482,10 +482,10 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
return return
} }
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
framework.Failf("failed to update ingress %s/%s: %v", ns, name, err) e2elog.Failf("failed to update ingress %s/%s: %v", ns, name, err)
} }
} }
framework.Failf("too many retries updating ingress %s/%s", ns, name) e2elog.Failf("too many retries updating ingress %s/%s", ns, name)
} }
// AddHTTPS updates the ingress to add this secret for these hosts. // AddHTTPS updates the ingress to add this secret for these hosts.
@ -543,7 +543,7 @@ func (j *TestJig) GetRootCA(secretName string) (rootCA []byte) {
var ok bool var ok bool
rootCA, ok = j.RootCAs[secretName] rootCA, ok = j.RootCAs[secretName]
if !ok { if !ok {
framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName) e2elog.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
} }
return return
} }
@ -675,7 +675,7 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address st
// WaitForIngress returns when it gets the first 200 response // WaitForIngress returns when it gets the first 200 response
func (j *TestJig) WaitForIngress(waitForNodePort bool) { func (j *TestJig) WaitForIngress(waitForNodePort bool) {
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil { if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil {
framework.Failf("error in waiting for ingress to get an address: %s", err) e2elog.Failf("error in waiting for ingress to get an address: %s", err)
} }
} }
@ -688,7 +688,7 @@ func (j *TestJig) WaitForIngressToStable() {
} }
return true, nil return true, nil
}); err != nil { }); err != nil {
framework.Failf("error in waiting for ingress to stablize: %v", err) e2elog.Failf("error in waiting for ingress to stablize: %v", err)
} }
} }
@ -814,7 +814,7 @@ func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) {
// Wait for the loadbalancer IP. // Wait for the loadbalancer IP.
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout)
if err != nil { if err != nil {
framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) e2elog.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout)
} }
responses := sets.NewString() responses := sets.NewString()
timeoutClient := &http.Client{Timeout: IngressReqTimeout} timeoutClient := &http.Client{Timeout: IngressReqTimeout}
@ -858,7 +858,7 @@ func (cont *NginxIngressController) Init() {
pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(pods.Items) == 0 { if len(pods.Items) == 0 {
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) e2elog.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
} }
cont.pod = &pods.Items[0] cont.pod = &pods.Items[0]
cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod) cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod)

View File

@ -43,7 +43,7 @@ func MakeFirewallNameForLBService(name string) string {
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service // ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall { func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
if svc.Spec.Type != v1.ServiceTypeLoadBalancer { if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
framework.Failf("can not construct firewall rule for non-loadbalancer type service") e2elog.Failf("can not construct firewall rule for non-loadbalancer type service")
} }
fw := compute.Firewall{} fw := compute.Firewall{}
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc)) fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
@ -71,7 +71,7 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service // ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall { func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
if svc.Spec.Type != v1.ServiceTypeLoadBalancer { if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
framework.Failf("can not construct firewall rule for non-loadbalancer type service") e2elog.Failf("can not construct firewall rule for non-loadbalancer type service")
} }
fw := compute.Firewall{} fw := compute.Firewall{}
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck) fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)

View File

@ -262,7 +262,7 @@ func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerNa
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("Failed to cleanup service GCE resources.") e2elog.Failf("Failed to cleanup service GCE resources.")
} }
} }
@ -332,7 +332,7 @@ func GetInstanceTags(cloudConfig framework.CloudConfig, instanceName string) *co
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone, res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
instanceName).Do() instanceName).Do()
if err != nil { if err != nil {
framework.Failf("Failed to get instance tags for %v: %v", instanceName, err) e2elog.Failf("Failed to get instance tags for %v: %v", instanceName, err)
} }
return res.Tags return res.Tags
} }
@ -346,7 +346,7 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
cloudConfig.ProjectID, zone, instanceName, cloudConfig.ProjectID, zone, instanceName,
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do() &compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
if err != nil { if err != nil {
framework.Failf("failed to set instance tags: %v", err) e2elog.Failf("failed to set instance tags: %v", err)
} }
e2elog.Logf("Sent request to set tags %v on instance: %v", tags, instanceName) e2elog.Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
return resTags.Items return resTags.Items

View File

@ -788,12 +788,12 @@ func (cont *IngressController) CreateStaticIP(name string) string {
e2elog.Logf("Failed to delete static ip %v: %v", name, delErr) e2elog.Logf("Failed to delete static ip %v: %v", name, delErr)
} }
} }
framework.Failf("Failed to allocate static ip %v: %v", name, err) e2elog.Failf("Failed to allocate static ip %v: %v", name, err)
} }
ip, err := gceCloud.GetGlobalAddress(name) ip, err := gceCloud.GetGlobalAddress(name)
if err != nil { if err != nil {
framework.Failf("Failed to get newly created static ip %v: %v", name, err) e2elog.Failf("Failed to get newly created static ip %v: %v", name, err)
} }
cont.staticIPName = ip.Name cont.staticIPName = ip.Name

View File

@ -66,7 +66,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
} }
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.")
} }
}) })
@ -97,12 +97,12 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) { func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) {
err := RecreateNodes(c, nodes) err := RecreateNodes(c, nodes)
if err != nil { if err != nil {
framework.Failf("Test failed; failed to start the restart instance group command.") e2elog.Failf("Test failed; failed to start the restart instance group command.")
} }
err = WaitForNodeBootIdsToChange(c, nodes, framework.RecreateNodeReadyAgainTimeout) err = WaitForNodeBootIdsToChange(c, nodes, framework.RecreateNodeReadyAgainTimeout)
if err != nil { if err != nil {
framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout) e2elog.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
} }
nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout) nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
@ -110,7 +110,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter)) e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
if len(nodes) != len(nodesAfter) { if len(nodes) != len(nodesAfter) {
framework.Failf("Had %d nodes before nodes were recreated, but now only have %d", e2elog.Failf("Had %d nodes before nodes were recreated, but now only have %d",
len(nodes), len(nodesAfter)) len(nodes), len(nodesAfter))
} }
@ -120,6 +120,6 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
framework.ExpectNoError(err) framework.ExpectNoError(err)
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) { if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
framework.Failf("At least one pod wasn't running and ready after the restart.") e2elog.Failf("At least one pod wasn't running and ready after the restart.")
} }
} }

View File

@ -247,7 +247,7 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config TestConfig,
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret) secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
if err != nil { if err != nil {
framework.Failf("Failed to create secrets for Ceph RBD: %v", err) e2elog.Failf("Failed to create secrets for Ceph RBD: %v", err)
} }
return config, pod, secret, ip return config, pod, secret, ip
@ -485,7 +485,7 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in
} }
clientPod, err := podsNamespacer.Create(clientPod) clientPod, err := podsNamespacer.Create(clientPod)
if err != nil { if err != nil {
framework.Failf("Failed to create %s pod: %v", clientPod.Name, err) e2elog.Failf("Failed to create %s pod: %v", clientPod.Name, err)
} }
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod)) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))

View File

@ -52,7 +52,7 @@ func createNodePoolWithLocalSsds(nodePoolName string) {
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster), fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
"--local-ssd-count=1").CombinedOutput() "--local-ssd-count=1").CombinedOutput()
if err != nil { if err != nil {
framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out)) e2elog.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
} }
e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out)) e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
} }

View File

@ -51,21 +51,21 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
"--num-nodes=2").CombinedOutput() "--num-nodes=2").CombinedOutput()
e2elog.Logf("\n%s", string(out)) e2elog.Logf("\n%s", string(out))
if err != nil { if err != nil {
framework.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out)) e2elog.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out))
} }
e2elog.Logf("Successfully created node pool %q.", poolName) e2elog.Logf("Successfully created node pool %q.", poolName)
out, err = exec.Command("gcloud", "container", "node-pools", "list", out, err = exec.Command("gcloud", "container", "node-pools", "list",
clusterStr).CombinedOutput() clusterStr).CombinedOutput()
if err != nil { if err != nil {
framework.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) e2elog.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
} }
e2elog.Logf("Node pools:\n%s", string(out)) e2elog.Logf("Node pools:\n%s", string(out))
e2elog.Logf("Checking that 2 nodes have the correct node pool label.") e2elog.Logf("Checking that 2 nodes have the correct node pool label.")
nodeCount := nodesWithPoolLabel(f, poolName) nodeCount := nodesWithPoolLabel(f, poolName)
if nodeCount != 2 { if nodeCount != 2 {
framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount) e2elog.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount)
} }
e2elog.Logf("Success, found 2 nodes with correct node pool labels.") e2elog.Logf("Success, found 2 nodes with correct node pool labels.")
@ -76,21 +76,21 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
"-q").CombinedOutput() "-q").CombinedOutput()
e2elog.Logf("\n%s", string(out)) e2elog.Logf("\n%s", string(out))
if err != nil { if err != nil {
framework.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out)) e2elog.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out))
} }
e2elog.Logf("Successfully deleted node pool %q.", poolName) e2elog.Logf("Successfully deleted node pool %q.", poolName)
out, err = exec.Command("gcloud", "container", "node-pools", "list", out, err = exec.Command("gcloud", "container", "node-pools", "list",
clusterStr).CombinedOutput() clusterStr).CombinedOutput()
if err != nil { if err != nil {
framework.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) e2elog.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
} }
e2elog.Logf("\nNode pools:\n%s", string(out)) e2elog.Logf("\nNode pools:\n%s", string(out))
e2elog.Logf("Checking that no nodes have the deleted node pool's label.") e2elog.Logf("Checking that no nodes have the deleted node pool's label.")
nodeCount = nodesWithPoolLabel(f, poolName) nodeCount = nodesWithPoolLabel(f, poolName)
if nodeCount != 0 { if nodeCount != 0 {
framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount) e2elog.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount)
} }
e2elog.Logf("Success, found no nodes with the deleted node pool's label.") e2elog.Logf("Success, found no nodes with the deleted node pool's label.")
} }

View File

@ -118,7 +118,7 @@ func (p *esLogProvider) Init() error {
return err return err
} }
if int(statusCode) != 200 { if int(statusCode) != 200 {
framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode) e2elog.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
} }
// Now assume we really are talking to an Elasticsearch instance. // Now assume we really are talking to an Elasticsearch instance.

View File

@ -123,8 +123,8 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness) pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)
if err != nil { if err != nil {
framework.Failf("Error in wait... %v", err) e2elog.Failf("Error in wait... %v", err)
} else if len(pods) < totalPods { } else if len(pods) < totalPods {
framework.Failf("Only got %v out of %v", len(pods), totalPods) e2elog.Failf("Only got %v out of %v", len(pods), totalPods)
} }
} }

View File

@ -75,5 +75,5 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
e2elog.Logf("failed to retrieve kubelet stats -\n %v", errors) e2elog.Logf("failed to retrieve kubelet stats -\n %v", errors)
time.Sleep(cadvisor.SleepDuration) time.Sleep(cadvisor.SleepDuration)
} }
framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) e2elog.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
} }

View File

@ -58,7 +58,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
kubeClient := f.ClientSet kubeClient := f.ClientSet
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
framework.Failf("Failed to load config: %s", err) e2elog.Failf("Failed to load config: %s", err)
} }
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config) discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient) cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient)
@ -73,7 +73,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
kubeClient := f.ClientSet kubeClient := f.ClientSet
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
framework.Failf("Failed to load config: %s", err) e2elog.Failf("Failed to load config: %s", err)
} }
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config) discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient) cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient)
@ -88,7 +88,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
kubeClient := f.ClientSet kubeClient := f.ClientSet
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
framework.Failf("Failed to load config: %s", err) e2elog.Failf("Failed to load config: %s", err)
} }
externalMetricsClient := externalclient.NewForConfigOrDie(config) externalMetricsClient := externalclient.NewForConfigOrDie(config)
testExternalMetrics(f, kubeClient, externalMetricsClient) testExternalMetrics(f, kubeClient, externalMetricsClient)
@ -103,32 +103,32 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
gcmService, err := gcm.New(client) gcmService, err := gcm.New(client)
if err != nil { if err != nil {
framework.Failf("Failed to create gcm service, %v", err) e2elog.Failf("Failed to create gcm service, %v", err)
} }
// Set up a cluster: create a custom metric and set up k8s-sd adapter // Set up a cluster: create a custom metric and set up k8s-sd adapter
err = CreateDescriptors(gcmService, projectID) err = CreateDescriptors(gcmService, projectID)
if err != nil { if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err) e2elog.Failf("Failed to create metric descriptor: %s", err)
} }
defer CleanupDescriptors(gcmService, projectID) defer CleanupDescriptors(gcmService, projectID)
err = CreateAdapter(adapterDeployment) err = CreateAdapter(adapterDeployment)
if err != nil { if err != nil {
framework.Failf("Failed to set up: %s", err) e2elog.Failf("Failed to set up: %s", err)
} }
defer CleanupAdapter(adapterDeployment) defer CleanupAdapter(adapterDeployment)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
if err != nil { if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err) e2elog.Failf("Failed to create ClusterRoleBindings: %v", err)
} }
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{}) defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
// Run application that exports the metric // Run application that exports the metric
_, err = createSDExporterPods(f, kubeClient) _, err = createSDExporterPods(f, kubeClient)
if err != nil { if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %s", err) e2elog.Failf("Failed to create stackdriver-exporter pod: %s", err)
} }
defer cleanupSDExporterPod(f, kubeClient) defer cleanupSDExporterPod(f, kubeClient)
@ -149,33 +149,33 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
gcmService, err := gcm.New(client) gcmService, err := gcm.New(client)
if err != nil { if err != nil {
framework.Failf("Failed to create gcm service, %v", err) e2elog.Failf("Failed to create gcm service, %v", err)
} }
// Set up a cluster: create a custom metric and set up k8s-sd adapter // Set up a cluster: create a custom metric and set up k8s-sd adapter
err = CreateDescriptors(gcmService, projectID) err = CreateDescriptors(gcmService, projectID)
if err != nil { if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err) e2elog.Failf("Failed to create metric descriptor: %s", err)
} }
defer CleanupDescriptors(gcmService, projectID) defer CleanupDescriptors(gcmService, projectID)
// Both deployments - for old and new resource model - expose External Metrics API. // Both deployments - for old and new resource model - expose External Metrics API.
err = CreateAdapter(AdapterForOldResourceModel) err = CreateAdapter(AdapterForOldResourceModel)
if err != nil { if err != nil {
framework.Failf("Failed to set up: %s", err) e2elog.Failf("Failed to set up: %s", err)
} }
defer CleanupAdapter(AdapterForOldResourceModel) defer CleanupAdapter(AdapterForOldResourceModel)
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
if err != nil { if err != nil {
framework.Failf("Failed to create ClusterRoleBindings: %v", err) e2elog.Failf("Failed to create ClusterRoleBindings: %v", err)
} }
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{}) defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
// Run application that exports the metric // Run application that exports the metric
pod, err := createSDExporterPods(f, kubeClient) pod, err := createSDExporterPods(f, kubeClient)
if err != nil { if err != nil {
framework.Failf("Failed to create stackdriver-exporter pod: %s", err) e2elog.Failf("Failed to create stackdriver-exporter pod: %s", err)
} }
defer cleanupSDExporterPod(f, kubeClient) defer cleanupSDExporterPod(f, kubeClient)
@ -190,34 +190,34 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
func verifyResponsesFromCustomMetricsAPI(f *framework.Framework, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient) { func verifyResponsesFromCustomMetricsAPI(f *framework.Framework, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient) {
resources, err := discoveryClient.ServerResourcesForGroupVersion("custom.metrics.k8s.io/v1beta1") resources, err := discoveryClient.ServerResourcesForGroupVersion("custom.metrics.k8s.io/v1beta1")
if err != nil { if err != nil {
framework.Failf("Failed to retrieve a list of supported metrics: %s", err) e2elog.Failf("Failed to retrieve a list of supported metrics: %s", err)
} }
if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+CustomMetricName) { if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+CustomMetricName) {
framework.Failf("Metric '%s' expected but not received", CustomMetricName) e2elog.Failf("Metric '%s' expected but not received", CustomMetricName)
} }
if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+UnusedMetricName) { if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+UnusedMetricName) {
framework.Failf("Metric '%s' expected but not received", UnusedMetricName) e2elog.Failf("Metric '%s' expected but not received", UnusedMetricName)
} }
value, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObject(schema.GroupKind{Group: "", Kind: "Pod"}, stackdriverExporterPod1, CustomMetricName, labels.NewSelector()) value, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObject(schema.GroupKind{Group: "", Kind: "Pod"}, stackdriverExporterPod1, CustomMetricName, labels.NewSelector())
if err != nil { if err != nil {
framework.Failf("Failed query: %s", err) e2elog.Failf("Failed query: %s", err)
} }
if value.Value.Value() != CustomMetricValue { if value.Value.Value() != CustomMetricValue {
framework.Failf("Unexpected metric value for metric %s: expected %v but received %v", CustomMetricName, CustomMetricValue, value.Value) e2elog.Failf("Unexpected metric value for metric %s: expected %v but received %v", CustomMetricName, CustomMetricValue, value.Value)
} }
filter, err := labels.NewRequirement("name", selection.Equals, []string{stackdriverExporterLabel}) filter, err := labels.NewRequirement("name", selection.Equals, []string{stackdriverExporterLabel})
if err != nil { if err != nil {
framework.Failf("Couldn't create a label filter") e2elog.Failf("Couldn't create a label filter")
} }
values, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObjects(schema.GroupKind{Group: "", Kind: "Pod"}, labels.NewSelector().Add(*filter), CustomMetricName, labels.NewSelector()) values, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObjects(schema.GroupKind{Group: "", Kind: "Pod"}, labels.NewSelector().Add(*filter), CustomMetricName, labels.NewSelector())
if err != nil { if err != nil {
framework.Failf("Failed query: %s", err) e2elog.Failf("Failed query: %s", err)
} }
if len(values.Items) != 1 { if len(values.Items) != 1 {
framework.Failf("Expected results for exactly 1 pod, but %v results received", len(values.Items)) e2elog.Failf("Expected results for exactly 1 pod, but %v results received", len(values.Items))
} }
if values.Items[0].DescribedObject.Name != stackdriverExporterPod1 || values.Items[0].Value.Value() != CustomMetricValue { if values.Items[0].DescribedObject.Name != stackdriverExporterPod1 || values.Items[0].Value.Value() != CustomMetricValue {
framework.Failf("Unexpected metric value for metric %s and pod %s: %v", CustomMetricName, values.Items[0].DescribedObject.Name, values.Items[0].Value.Value()) e2elog.Failf("Unexpected metric value for metric %s and pod %s: %v", CustomMetricName, values.Items[0].DescribedObject.Name, values.Items[0].Value.Value())
} }
} }
@ -242,16 +242,16 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric
NamespacedMetrics("dummy"). NamespacedMetrics("dummy").
List("custom.googleapis.com|"+CustomMetricName, labels.NewSelector().Add(*req1, *req2, *req3, *req4, *req5)) List("custom.googleapis.com|"+CustomMetricName, labels.NewSelector().Add(*req1, *req2, *req3, *req4, *req5))
if err != nil { if err != nil {
framework.Failf("Failed query: %s", err) e2elog.Failf("Failed query: %s", err)
} }
if len(values.Items) != 1 { if len(values.Items) != 1 {
framework.Failf("Expected exactly one external metric value, but % values received", len(values.Items)) e2elog.Failf("Expected exactly one external metric value, but % values received", len(values.Items))
} }
if values.Items[0].MetricName != "custom.googleapis.com|"+CustomMetricName || if values.Items[0].MetricName != "custom.googleapis.com|"+CustomMetricName ||
values.Items[0].Value.Value() != CustomMetricValue || values.Items[0].Value.Value() != CustomMetricValue ||
// Check one label just to make sure labels are included // Check one label just to make sure labels are included
values.Items[0].MetricLabels["resource.labels.pod_id"] != string(pod.UID) { values.Items[0].MetricLabels["resource.labels.pod_id"] != string(pod.UID) {
framework.Failf("Unexpected result for metric %s: %v", CustomMetricName, values.Items[0]) e2elog.Failf("Unexpected result for metric %s: %v", CustomMetricName, values.Items[0])
} }
} }

View File

@ -212,7 +212,7 @@ func getInstanceLabelsAvailableForMetric(c clientset.Interface, duration time.Du
instanceLabels := make([]string, 0) instanceLabels := make([]string, 0)
m, ok := result.(model.Matrix) m, ok := result.(model.Matrix)
if !ok { if !ok {
framework.Failf("Expected matrix response for query '%v', got: %T", query, result) e2elog.Failf("Expected matrix response for query '%v', got: %T", query, result)
return instanceLabels, nil return instanceLabels, nil
} }
for _, stream := range m { for _, stream := range m {
@ -373,7 +373,7 @@ func retryUntilSucceeds(validator func() error, timeout time.Duration) {
e2elog.Logf(err.Error()) e2elog.Logf(err.Error())
time.Sleep(prometheusSleepBetweenAttempts) time.Sleep(prometheusSleepBetweenAttempts)
} }
framework.Failf(err.Error()) e2elog.Failf(err.Error())
} }
func getAllNodes(c clientset.Interface) ([]string, error) { func getAllNodes(c clientset.Interface) ([]string, error) {

View File

@ -86,7 +86,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
ts, err := google.DefaultTokenSource(oauth2.NoContext) ts, err := google.DefaultTokenSource(oauth2.NoContext)
e2elog.Logf("Couldn't get application default credentials, %v", err) e2elog.Logf("Couldn't get application default credentials, %v", err)
if err != nil { if err != nil {
framework.Failf("Error accessing application default credentials, %v", err) e2elog.Failf("Error accessing application default credentials, %v", err)
} }
client := oauth2.NewClient(oauth2.NoContext, ts) client := oauth2.NewClient(oauth2.NoContext, ts)
*/ */

View File

@ -30,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
) )
@ -68,7 +69,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope) oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope)
if err != nil { if err != nil {
framework.Failf("Failed to create oauth client: %s", err) e2elog.Failf("Failed to create oauth client: %s", err)
} }
// Create test pod with unique name. // Create test pod with unique name.
@ -82,22 +83,22 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
resp, err := oauthClient.Get(endpoint) resp, err := oauthClient.Get(endpoint)
if err != nil { if err != nil {
framework.Failf("Failed to call Stackdriver Metadata API %s", err) e2elog.Failf("Failed to call Stackdriver Metadata API %s", err)
} }
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
framework.Failf("Stackdriver Metadata API returned error status: %s", resp.Status) e2elog.Failf("Stackdriver Metadata API returned error status: %s", resp.Status)
} }
metadataAPIResponse, err := ioutil.ReadAll(resp.Body) metadataAPIResponse, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
framework.Failf("Failed to read response from Stackdriver Metadata API: %s", err) e2elog.Failf("Failed to read response from Stackdriver Metadata API: %s", err)
} }
exists, err := verifyPodExists(metadataAPIResponse, uniqueContainerName) exists, err := verifyPodExists(metadataAPIResponse, uniqueContainerName)
if err != nil { if err != nil {
framework.Failf("Failed to process response from Stackdriver Metadata API: %s", err) e2elog.Failf("Failed to process response from Stackdriver Metadata API: %s", err)
} }
if !exists { if !exists {
framework.Failf("Missing Metadata for container %q", uniqueContainerName) e2elog.Failf("Missing Metadata for container %q", uniqueContainerName)
} }
} }

View File

@ -185,17 +185,17 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
ginkgo.By("verifying the CronJob " + cjName + " was created") ginkgo.By("verifying the CronJob " + cjName + " was created")
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err) e2elog.Failf("Failed getting CronJob %s: %v", cjName, err)
} }
if sj.Spec.Schedule != schedule { if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule) e2elog.Failf("Failed creating a CronJob with correct schedule %s", schedule)
} }
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) { if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) e2elog.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
} }
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") e2elog.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
} }
}) })
}) })
@ -233,7 +233,7 @@ var _ = SIGDescribe("Kubectl client", func() {
if err != nil || len(pods) < atLeast { if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns) framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err) e2elog.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
} }
} }
@ -379,7 +379,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("executing a command in the container") ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a { if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
ginkgo.By("executing a very long command in the container") ginkgo.By("executing a very long command in the container")
@ -395,13 +395,13 @@ var _ = SIGDescribe("Kubectl client", func() {
WithStdinData("abcd1234"). WithStdinData("abcd1234").
ExecOrDie() ExecOrDie()
if e, a := "abcd1234", execOutput; e != a { if e, a := "abcd1234", execOutput; e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
// pretend that we're a user in an interactive shell // pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n") r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil { if err != nil {
framework.Failf("Error creating blocking reader: %v", err) e2elog.Failf("Error creating blocking reader: %v", err)
} }
// NOTE this is solely for test cleanup! // NOTE this is solely for test cleanup!
defer closer.Close() defer closer.Close()
@ -411,7 +411,7 @@ var _ = SIGDescribe("Kubectl client", func() {
WithStdinReader(r). WithStdinReader(r).
ExecOrDie() ExecOrDie()
if e, a := "hi", strings.TrimSpace(execOutput); e != a { if e, a := "hi", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
}) })
@ -419,14 +419,14 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("executing a command in the container") ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodResourceName, "echo", "running", "in", "container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodResourceName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a { if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
}) })
ginkgo.It("should support exec through an HTTP proxy", func() { ginkgo.It("should support exec through an HTTP proxy", func() {
// Fail if the variable isn't set // Fail if the variable isn't set
if framework.TestContext.Host == "" { if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") e2elog.Failf("--host variable must be set to the full URI to the api server on e2e run.")
} }
ginkgo.By("Starting goproxy") ginkgo.By("Starting goproxy")
@ -444,7 +444,7 @@ var _ = SIGDescribe("Kubectl client", func() {
// Verify we got the normal output captured by the exec server // Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n" expectedExecOutput := "running in container\n"
if output != expectedExecOutput { if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
} }
// Verify the proxy server logs saw the connection // Verify the proxy server logs saw the connection
@ -452,7 +452,7 @@ var _ = SIGDescribe("Kubectl client", func() {
proxyLog := proxyLogs.String() proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) { if !strings.Contains(proxyLog, expectedProxyLog) {
framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog) e2elog.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
} }
} }
}) })
@ -460,7 +460,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.It("should support exec through kubectl proxy", func() { ginkgo.It("should support exec through kubectl proxy", func() {
// Fail if the variable isn't set // Fail if the variable isn't set
if framework.TestContext.Host == "" { if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") e2elog.Failf("--host variable must be set to the full URI to the api server on e2e run.")
} }
ginkgo.By("Starting kubectl proxy") ginkgo.By("Starting kubectl proxy")
@ -479,7 +479,7 @@ var _ = SIGDescribe("Kubectl client", func() {
// Verify we got the normal output captured by the exec server // Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n" expectedExecOutput := "running in container\n"
if output != expectedExecOutput { if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
} }
}) })
@ -541,7 +541,7 @@ var _ = SIGDescribe("Kubectl client", func() {
// to loop test. // to loop test.
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test") e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test")
} }
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
@ -569,14 +569,14 @@ var _ = SIGDescribe("Kubectl client", func() {
runTestPod, _, err = polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) runTestPod, _, err = polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
} }
// NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have
// to loop test. // to loop test.
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
} }
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed")) gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed"))
@ -595,7 +595,7 @@ var _ = SIGDescribe("Kubectl client", func() {
framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReady(c, ns, []string{podName}, framework.PodStartTimeout) { if !e2epod.CheckPodsRunningReady(c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod for run-log-test was not ready") e2elog.Failf("Pod for run-log-test was not ready")
} }
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", "-f", "run-log-test") logOutput := framework.RunKubectlOrDie(nsFlag, "logs", "-f", "run-log-test")
@ -612,10 +612,10 @@ var _ = SIGDescribe("Kubectl client", func() {
body, err := curl(localAddr) body, err := curl(localAddr)
e2elog.Logf("got: %s", body) e2elog.Logf("got: %s", body)
if err != nil { if err != nil {
framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err) e2elog.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
} }
if !strings.Contains(body, nginxDefaultOutput) { if !strings.Contains(body, nginxDefaultOutput) {
framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body) e2elog.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body)
} }
}) })
@ -754,7 +754,7 @@ metadata:
ginkgo.By("validating api versions") ginkgo.By("validating api versions")
output := framework.RunKubectlOrDie("api-versions") output := framework.RunKubectlOrDie("api-versions")
if !strings.Contains(output, "v1") { if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions") e2elog.Failf("No v1 in kubectl api-versions")
} }
}) })
}) })
@ -805,7 +805,7 @@ metadata:
ginkgo.By("checking the result") ginkgo.By("checking the result")
if originalNodePort != currentNodePort { if originalNodePort != currentNodePort {
framework.Failf("port should keep the same") e2elog.Failf("port should keep the same")
} }
}) })
@ -822,7 +822,7 @@ metadata:
output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString := "\"replicas\": 2" requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) { if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString) e2elog.Failf("Missing %s in kubectl view-last-applied", requiredString)
} }
ginkgo.By("apply file doesn't have replicas") ginkgo.By("apply file doesn't have replicas")
@ -832,7 +832,7 @@ metadata:
output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString = "\"replicas\": 2" requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) { if strings.Contains(output, requiredString) {
framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) e2elog.Failf("Presenting %s in kubectl view-last-applied", requiredString)
} }
ginkgo.By("scale set replicas to 3") ginkgo.By("scale set replicas to 3")
@ -848,7 +848,7 @@ metadata:
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)} requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(output, item) { if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item) e2elog.Failf("Missing %s in kubectl apply", item)
} }
} }
}) })
@ -887,7 +887,7 @@ metadata:
schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema { schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema {
d, err := f.ClientSet.Discovery().OpenAPISchema() d, err := f.ClientSet.Discovery().OpenAPISchema()
if err != nil { if err != nil {
framework.Failf("%v", err) e2elog.Failf("%v", err)
} }
if d == nil || d.Definitions == nil { if d == nil || d.Definitions == nil {
return nil return nil
@ -909,7 +909,7 @@ metadata:
ginkgo.By("create CRD with no validation schema") ginkgo.By("create CRD with no validation schema")
crd, err := crd.CreateTestCRD(f) crd, err := crd.CreateTestCRD(f)
if err != nil { if err != nil {
framework.Failf("failed to create test CRD: %v", err) e2elog.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() defer crd.CleanUp()
@ -919,7 +919,7 @@ metadata:
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr") meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil { if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err) e2elog.Failf("%v", err)
} }
}) })
@ -928,12 +928,12 @@ metadata:
crd, err := crd.CreateTestCRD(f, func(crd *v1beta1.CustomResourceDefinition) { crd, err := crd.CreateTestCRD(f, func(crd *v1beta1.CustomResourceDefinition) {
props := &v1beta1.JSONSchemaProps{} props := &v1beta1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil { if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err) e2elog.Failf("failed to unmarshal schema: %v", err)
} }
crd.Spec.Validation = &v1beta1.CustomResourceValidation{OpenAPIV3Schema: props} crd.Spec.Validation = &v1beta1.CustomResourceValidation{OpenAPIV3Schema: props}
}) })
if err != nil { if err != nil {
framework.Failf("failed to create test CRD: %v", err) e2elog.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() defer crd.CleanUp()
@ -943,7 +943,7 @@ metadata:
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr") meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta) validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil { if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil {
framework.Failf("%v", err) e2elog.Failf("%v", err)
} }
}) })
@ -952,12 +952,12 @@ metadata:
crd, err := crd.CreateTestCRD(f, func(crd *v1beta1.CustomResourceDefinition) { crd, err := crd.CreateTestCRD(f, func(crd *v1beta1.CustomResourceDefinition) {
props := &v1beta1.JSONSchemaProps{} props := &v1beta1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil { if err := yaml.Unmarshal(schemaFoo, props); err != nil {
framework.Failf("failed to unmarshal schema: %v", err) e2elog.Failf("failed to unmarshal schema: %v", err)
} }
crd.Spec.Validation = &v1beta1.CustomResourceValidation{OpenAPIV3Schema: props} crd.Spec.Validation = &v1beta1.CustomResourceValidation{OpenAPIV3Schema: props}
}) })
if err != nil { if err != nil {
framework.Failf("failed to create test CRD: %v", err) e2elog.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() defer crd.CleanUp()
@ -980,11 +980,11 @@ metadata:
validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}],"extraProperty":"arbitrary-value"}}`, meta) validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}],"extraProperty":"arbitrary-value"}}`, meta)
if err := createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd); err != nil { if err := createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd); err != nil {
if expectSuccess { if expectSuccess {
framework.Failf("%v", err) e2elog.Failf("%v", err)
} }
} else { } else {
if !expectSuccess { if !expectSuccess {
framework.Failf("expected error, got none") e2elog.Failf("expected error, got none")
} }
} }
}) })
@ -1004,7 +1004,7 @@ metadata:
requiredItems := []string{"Kubernetes master", "is running at"} requiredItems := []string{"Kubernetes master", "is running at"}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(output, item) { if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item) e2elog.Failf("Missing %s in kubectl cluster-info", item)
} }
} }
}) })
@ -1168,11 +1168,11 @@ metadata:
return false, nil return false, nil
} }
if len(uidToPort) > 1 { if len(uidToPort) > 1 {
framework.Failf("Too many endpoints found") e2elog.Failf("Too many endpoints found")
} }
for _, port := range uidToPort { for _, port := range uidToPort {
if port[0] != redisPort { if port[0] != redisPort {
framework.Failf("Wrong endpoint port: %d", port[0]) e2elog.Failf("Wrong endpoint port: %d", port[0])
} }
} }
return true, nil return true, nil
@ -1183,14 +1183,14 @@ metadata:
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(service.Spec.Ports) != 1 { if len(service.Spec.Ports) != 1 {
framework.Failf("1 port is expected") e2elog.Failf("1 port is expected")
} }
port := service.Spec.Ports[0] port := service.Spec.Ports[0]
if port.Port != int32(servicePort) { if port.Port != int32(servicePort) {
framework.Failf("Wrong service port: %d", port.Port) e2elog.Failf("Wrong service port: %d", port.Port)
} }
if port.TargetPort.IntValue() != redisPort { if port.TargetPort.IntValue() != redisPort {
framework.Failf("Wrong target port: %d", port.TargetPort.IntValue()) e2elog.Failf("Wrong target port: %d", port.TargetPort.IntValue())
} }
} }
@ -1234,7 +1234,7 @@ metadata:
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if !strings.Contains(output, labelValue) { if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) e2elog.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
} }
ginkgo.By("removing the label " + labelName + " of a pod") ginkgo.By("removing the label " + labelName + " of a pod")
@ -1242,7 +1242,7 @@ metadata:
ginkgo.By("verifying the pod doesn't have the label " + labelName) ginkgo.By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if strings.Contains(output, labelValue) { if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) e2elog.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
} }
}) })
}) })
@ -1271,7 +1271,7 @@ metadata:
podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName) podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar") tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar")
if err != nil { if err != nil {
framework.Failf("Failed creating temporary destination file: %v", err) e2elog.Failf("Failed creating temporary destination file: %v", err)
} }
ginkgo.By("specifying a remote filepath " + podSource + " on the pod") ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
@ -1279,10 +1279,10 @@ metadata:
ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
localData, err := ioutil.ReadAll(tempDestination) localData, err := ioutil.ReadAll(tempDestination)
if err != nil { if err != nil {
framework.Failf("Failed reading temporary local file: %v", err) e2elog.Failf("Failed reading temporary local file: %v", err)
} }
if string(localData) != remoteContents { if string(localData) != remoteContents {
framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData)) e2elog.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
} }
}) })
}) })
@ -1344,7 +1344,7 @@ metadata:
gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1)) gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil { if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil { if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) e2elog.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
} }
} }
@ -1390,7 +1390,7 @@ metadata:
} }
} }
if !found { if !found {
framework.Failf("Added annotation not found") e2elog.Failf("Added annotation not found")
} }
}) })
}) })
@ -1407,7 +1407,7 @@ metadata:
requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"} requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(version, item) { if !strings.Contains(version, item) {
framework.Failf("Required item %s not found in %s", item, version) e2elog.Failf("Required item %s not found in %s", item, version)
} }
} }
}) })
@ -1441,12 +1441,12 @@ metadata:
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil { if err != nil {
framework.Failf("Failed getting pod controlled by %s: %v", name, err) e2elog.Failf("Failed getting pod controlled by %s: %v", name, err)
} }
pods := podlist.Items pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
} }
}) })
}) })
@ -1475,23 +1475,23 @@ metadata:
ginkgo.By("verifying the rc " + rcName + " was created") ginkgo.By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err) e2elog.Failf("Failed getting rc %s: %v", rcName, err)
} }
containers := rc.Spec.Template.Spec.Containers containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) { if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) e2elog.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
} }
ginkgo.By("verifying the pod controlled by rc " + rcName + " was created") ginkgo.By("verifying the pod controlled by rc " + rcName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil { if err != nil {
framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err) e2elog.Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
} }
pods := podlist.Items pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
} }
ginkgo.By("confirm that you can get logs from an rc") ginkgo.By("confirm that you can get logs from an rc")
@ -1500,12 +1500,12 @@ metadata:
podNames = append(podNames, pod.Name) podNames = append(podNames, pod.Name)
} }
if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) { if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
framework.Failf("Pods for rc %s were not ready", rcName) e2elog.Failf("Pods for rc %s were not ready", rcName)
} }
_, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag) _, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag)
// a non-nil error is fine as long as we actually found a pod. // a non-nil error is fine as long as we actually found a pod.
if err != nil && !strings.Contains(err.Error(), " in pod ") { if err != nil && !strings.Contains(err.Error(), " in pod ") {
framework.Failf("Failed getting logs by rc %s: %v", rcName, err) e2elog.Failf("Failed getting logs by rc %s: %v", rcName, err)
} }
}) })
}) })
@ -1536,11 +1536,11 @@ metadata:
ginkgo.By("verifying the rc " + rcName + " was created") ginkgo.By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err) e2elog.Failf("Failed getting rc %s: %v", rcName, err)
} }
containers := rc.Spec.Template.Spec.Containers containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) { if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) e2elog.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
} }
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout) framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
@ -1586,23 +1586,23 @@ metadata:
ginkgo.By("verifying the deployment " + dName + " was created") ginkgo.By("verifying the deployment " + dName + " was created")
d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{}) d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting deployment %s: %v", dName, err) e2elog.Failf("Failed getting deployment %s: %v", dName, err)
} }
containers := d.Spec.Template.Spec.Containers containers := d.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) { if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage) e2elog.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage)
} }
ginkgo.By("verifying the pod controlled by deployment " + dName + " was created") ginkgo.By("verifying the pod controlled by deployment " + dName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil { if err != nil {
framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err) e2elog.Failf("Failed getting pod controlled by deployment %s: %v", dName, err)
} }
pods := podlist.Items pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
} }
}) })
}) })
@ -1631,14 +1631,14 @@ metadata:
ginkgo.By("verifying the job " + jobName + " was created") ginkgo.By("verifying the job " + jobName + " was created")
job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting job %s: %v", jobName, err) e2elog.Failf("Failed getting job %s: %v", jobName, err)
} }
containers := job.Spec.Template.Spec.Containers containers := job.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) { if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers) e2elog.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
} }
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure") e2elog.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
} }
}) })
}) })
@ -1665,17 +1665,17 @@ metadata:
ginkgo.By("verifying the CronJob " + cjName + " was created") ginkgo.By("verifying the CronJob " + cjName + " was created")
cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err) e2elog.Failf("Failed getting CronJob %s: %v", cjName, err)
} }
if cj.Spec.Schedule != schedule { if cj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule) e2elog.Failf("Failed creating a CronJob with correct schedule %s", schedule)
} }
containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) { if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) e2elog.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
} }
if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") e2elog.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
} }
}) })
}) })
@ -1704,14 +1704,14 @@ metadata:
ginkgo.By("verifying the pod " + podName + " was created") ginkgo.By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err) e2elog.Failf("Failed getting pod %s: %v", podName, err)
} }
containers := pod.Spec.Containers containers := pod.Spec.Containers
if checkContainersImage(containers, nginxImage) { if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage) e2elog.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
} }
if pod.Spec.RestartPolicy != v1.RestartPolicyNever { if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never") e2elog.Failf("Failed creating a pod with correct restart policy for --restart=Never")
} }
}) })
}) })
@ -1742,13 +1742,13 @@ metadata:
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label) err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil { if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err) e2elog.Failf("Failed getting pod %s: %v", podName, err)
} }
ginkgo.By("verifying the pod " + podName + " was created") ginkgo.By("verifying the pod " + podName + " was created")
podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json")
if !strings.Contains(podJSON, podName) { if !strings.Contains(podJSON, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJSON) e2elog.Failf("Failed to find pod %s in [%s]", podName, podJSON)
} }
ginkgo.By("replace the image in the pod") ginkgo.By("replace the image in the pod")
@ -1758,11 +1758,11 @@ metadata:
ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err) e2elog.Failf("Failed getting deployment %s: %v", podName, err)
} }
containers := pod.Spec.Containers containers := pod.Spec.Containers
if checkContainersImage(containers, busyboxImage) { if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating pod with expected image %s", busyboxImage) e2elog.Failf("Failed creating pod with expected image %s", busyboxImage)
} }
}) })
}) })
@ -1812,16 +1812,16 @@ metadata:
defer framework.TryKill(cmd) defer framework.TryKill(cmd)
} }
if err != nil { if err != nil {
framework.Failf("Failed to start proxy server: %v", err) e2elog.Failf("Failed to start proxy server: %v", err)
} }
ginkgo.By("curling proxy /api/ output") ginkgo.By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port) localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr) apiVersions, err := getAPIVersions(localAddr)
if err != nil { if err != nil {
framework.Failf("Expected at least one supported apiversion, got error %v", err) e2elog.Failf("Expected at least one supported apiversion, got error %v", err)
} }
if len(apiVersions.Versions) < 1 { if len(apiVersions.Versions) < 1 {
framework.Failf("Expected at least one supported apiversion, got %v", apiVersions) e2elog.Failf("Expected at least one supported apiversion, got %v", apiVersions)
} }
}) })
@ -1834,7 +1834,7 @@ metadata:
ginkgo.By("Starting the proxy") ginkgo.By("Starting the proxy")
tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix") tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix")
if err != nil { if err != nil {
framework.Failf("Failed to create temporary directory: %v", err) e2elog.Failf("Failed to create temporary directory: %v", err)
} }
path := filepath.Join(tmpdir, "test") path := filepath.Join(tmpdir, "test")
defer os.Remove(path) defer os.Remove(path)
@ -1842,19 +1842,19 @@ metadata:
cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path)) cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil { if err != nil {
framework.Failf("Failed to start kubectl command: %v", err) e2elog.Failf("Failed to start kubectl command: %v", err)
} }
defer stdout.Close() defer stdout.Close()
defer stderr.Close() defer stderr.Close()
defer framework.TryKill(cmd) defer framework.TryKill(cmd)
buf := make([]byte, 128) buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil { if _, err = stdout.Read(buf); err != nil {
framework.Failf("Expected output from kubectl proxy: %v", err) e2elog.Failf("Expected output from kubectl proxy: %v", err)
} }
ginkgo.By("retrieving proxy /api/ output") ginkgo.By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path) _, err = curlUnix("http://unused/api", path)
if err != nil { if err != nil {
framework.Failf("Failed get of /api at %s: %v", path, err) e2elog.Failf("Failed get of /api at %s: %v", path, err)
} }
}) })
}) })
@ -1889,7 +1889,7 @@ metadata:
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key) ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName) output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) { if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) e2elog.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
} }
}) })
@ -1956,7 +1956,7 @@ metadata:
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName) output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) { if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) e2elog.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
} }
}) })
}) })
@ -1972,22 +1972,22 @@ metadata:
ginkgo.By("verifying that the quota was created") ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err) e2elog.Failf("Failed getting quota %s: %v", quotaName, err)
} }
if len(quota.Spec.Scopes) != 0 { if len(quota.Spec.Scopes) != 0 {
framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes) e2elog.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
} }
if len(quota.Spec.Hard) != 2 { if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard) e2elog.Failf("Expected two resources, got %v", quota.Spec.Hard)
} }
r, found := quota.Spec.Hard[v1.ResourcePods] r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r) e2elog.Failf("Expected pods=1000000, got %v", r)
} }
r, found = quota.Spec.Hard[v1.ResourceServices] r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r) e2elog.Failf("Expected services=1000000, got %v", r)
} }
}) })
@ -2001,21 +2001,21 @@ metadata:
ginkgo.By("verifying that the quota was created") ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err) e2elog.Failf("Failed getting quota %s: %v", quotaName, err)
} }
if len(quota.Spec.Scopes) != 2 { if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes) e2elog.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
} }
scopes := make(map[v1.ResourceQuotaScope]struct{}) scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes { for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{} scopes[scope] = struct{}{}
} }
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found { if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes) e2elog.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
} }
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found { if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes) e2elog.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
} }
}) })
@ -2026,7 +2026,7 @@ metadata:
ginkgo.By("calling kubectl quota") ginkgo.By("calling kubectl quota")
out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag) out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag)
if err == nil { if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out) e2elog.Failf("Expected kubectl to fail, but it succeeded: %s", out)
} }
}) })
}) })
@ -2055,7 +2055,7 @@ func checkOutputReturnError(output string, required [][]string) error {
func checkOutput(output string, required [][]string) { func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required) err := checkOutputReturnError(output, required)
if err != nil { if err != nil {
framework.Failf("%v", err) e2elog.Failf("%v", err)
} }
} }
@ -2072,7 +2072,7 @@ func checkKubectlOutputWithRetry(required [][]string, args ...string) {
return true, nil return true, nil
}) })
if pollErr != nil { if pollErr != nil {
framework.Failf("%v", pollErr) e2elog.Failf("%v", pollErr)
} }
return return
} }
@ -2153,17 +2153,17 @@ func validateGuestbookApp(c clientset.Interface, ns string) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting for frontend to serve content.") e2elog.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) { if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) e2elog.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
} }
e2elog.Logf("Trying to add a new entry to the guestbook.") e2elog.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) { if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds()) e2elog.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
} }
e2elog.Logf("Verifying that added entry can be retrieved.") e2elog.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) { if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds()) e2elog.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
} }
} }
@ -2209,7 +2209,7 @@ const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readReplicationControllerFromString(contents string) *v1.ReplicationController { func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{} rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil { if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error()) e2elog.Failf(err.Error())
} }
return &rc return &rc
@ -2222,7 +2222,7 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc.Spec.Template.Labels[applyTestLabel] = "ADDED" rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc) data, err := json.Marshal(rc)
if err != nil { if err != nil {
framework.Failf("json marshal failed: %s\n", err) e2elog.Failf("json marshal failed: %s\n", err)
} }
return bytes.NewReader(data) return bytes.NewReader(data)
@ -2242,7 +2242,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
} }
if rcs == nil || len(rcs.Items) == 0 { if rcs == nil || len(rcs.Items) == 0 {
framework.Failf("No replication controllers found") e2elog.Failf("No replication controllers found")
} }
for _, rc := range rcs.Items { for _, rc := range rcs.Items {
@ -2253,11 +2253,11 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
func validateReplicationControllerConfiguration(rc v1.ReplicationController) { func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "redis-master" { if rc.Name == "redis-master" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok { if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc) e2elog.Failf("Annotation not found in modified configuration:\n%v\n", rc)
} }
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" { if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc) e2elog.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
} }
} }
} }
@ -2285,7 +2285,7 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string)
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err) e2elog.Failf("Failed to retrieve data from container: %v", err)
} }
return err return err
} }

View File

@ -175,7 +175,7 @@ func runPortForward(ns, podName string, port int) *portForwardCommand {
e2elog.Logf("starting port-forward command and streaming output") e2elog.Logf("starting port-forward command and streaming output")
portOutput, _, err := framework.StartCmdAndStreamOutput(cmd) portOutput, _, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil { if err != nil {
framework.Failf("Failed to start port-forward command: %v", err) e2elog.Failf("Failed to start port-forward command: %v", err)
} }
buf := make([]byte, 128) buf := make([]byte, 128)
@ -183,17 +183,17 @@ func runPortForward(ns, podName string, port int) *portForwardCommand {
var n int var n int
e2elog.Logf("reading from `kubectl port-forward` command's stdout") e2elog.Logf("reading from `kubectl port-forward` command's stdout")
if n, err = portOutput.Read(buf); err != nil { if n, err = portOutput.Read(buf); err != nil {
framework.Failf("Failed to read from kubectl port-forward stdout: %v", err) e2elog.Failf("Failed to read from kubectl port-forward stdout: %v", err)
} }
portForwardOutput := string(buf[:n]) portForwardOutput := string(buf[:n])
match := portForwardRegexp.FindStringSubmatch(portForwardOutput) match := portForwardRegexp.FindStringSubmatch(portForwardOutput)
if len(match) != 3 { if len(match) != 3 {
framework.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput) e2elog.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput)
} }
listenPort, err := strconv.Atoi(match[2]) listenPort, err := strconv.Atoi(match[2])
if err != nil { if err != nil {
framework.Failf("Error converting %s to an int: %v", match[2], err) e2elog.Failf("Error converting %s to an int: %v", match[2], err)
} }
return &portForwardCommand{ return &portForwardCommand{
@ -206,10 +206,10 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
ginkgo.By("Creating the target pod") ginkgo.By("Creating the target pod")
pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
framework.Failf("Couldn't create pod: %v", err) e2elog.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
framework.Failf("Pod did not start running: %v", err) e2elog.Failf("Pod did not start running: %v", err)
} }
ginkgo.By("Running 'kubectl port-forward'") ginkgo.By("Running 'kubectl port-forward'")
@ -219,7 +219,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
ginkgo.By("Dialing the local port") ginkgo.By("Dialing the local port")
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
defer func() { defer func() {
ginkgo.By("Closing the connection to the local port") ginkgo.By("Closing the connection to the local port")
@ -229,16 +229,16 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
ginkgo.By("Reading data from the local port") ginkgo.By("Reading data from the local port")
fromServer, err := ioutil.ReadAll(conn) fromServer, err := ioutil.ReadAll(conn)
if err != nil { if err != nil {
framework.Failf("Unexpected error reading data from the server: %v", err) e2elog.Failf("Unexpected error reading data from the server: %v", err)
} }
if e, a := strings.Repeat("x", 100), string(fromServer); e != a { if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
framework.Failf("Expected %q from server, got %q", e, a) e2elog.Failf("Expected %q from server, got %q", e, a)
} }
ginkgo.By("Waiting for the target pod to stop running") ginkgo.By("Waiting for the target pod to stop running")
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
framework.Failf("Container did not terminate: %v", err) e2elog.Failf("Container did not terminate: %v", err)
} }
ginkgo.By("Verifying logs") ginkgo.By("Verifying logs")
@ -254,10 +254,10 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
ginkgo.By("Creating the target pod") ginkgo.By("Creating the target pod")
pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
framework.Failf("Couldn't create pod: %v", err) e2elog.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
framework.Failf("Pod did not start running: %v", err) e2elog.Failf("Pod did not start running: %v", err)
} }
ginkgo.By("Running 'kubectl port-forward'") ginkgo.By("Running 'kubectl port-forward'")
@ -267,7 +267,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
ginkgo.By("Dialing the local port") ginkgo.By("Dialing the local port")
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
ginkgo.By("Closing the connection to the local port") ginkgo.By("Closing the connection to the local port")
@ -275,7 +275,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
ginkgo.By("Waiting for the target pod to stop running") ginkgo.By("Waiting for the target pod to stop running")
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
framework.Failf("Container did not terminate: %v", err) e2elog.Failf("Container did not terminate: %v", err)
} }
ginkgo.By("Verifying logs") ginkgo.By("Verifying logs")
@ -291,10 +291,10 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
ginkgo.By("Creating the target pod") ginkgo.By("Creating the target pod")
pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
framework.Failf("Couldn't create pod: %v", err) e2elog.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
framework.Failf("Pod did not start running: %v", err) e2elog.Failf("Pod did not start running: %v", err)
} }
ginkgo.By("Running 'kubectl port-forward'") ginkgo.By("Running 'kubectl port-forward'")
@ -304,11 +304,11 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
ginkgo.By("Dialing the local port") ginkgo.By("Dialing the local port")
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
framework.Failf("Error resolving tcp addr: %v", err) e2elog.Failf("Error resolving tcp addr: %v", err)
} }
conn, err := net.DialTCP("tcp", nil, addr) conn, err := net.DialTCP("tcp", nil, addr)
if err != nil { if err != nil {
framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
defer func() { defer func() {
ginkgo.By("Closing the connection to the local port") ginkgo.By("Closing the connection to the local port")
@ -324,16 +324,16 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
ginkgo.By("Reading data from the local port") ginkgo.By("Reading data from the local port")
fromServer, err := ioutil.ReadAll(conn) fromServer, err := ioutil.ReadAll(conn)
if err != nil { if err != nil {
framework.Failf("Unexpected error reading data from the server: %v", err) e2elog.Failf("Unexpected error reading data from the server: %v", err)
} }
if e, a := strings.Repeat("x", 100), string(fromServer); e != a { if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
framework.Failf("Expected %q from server, got %q", e, a) e2elog.Failf("Expected %q from server, got %q", e, a)
} }
ginkgo.By("Waiting for the target pod to stop running") ginkgo.By("Waiting for the target pod to stop running")
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
framework.Failf("Container did not terminate: %v", err) e2elog.Failf("Container did not terminate: %v", err)
} }
ginkgo.By("Verifying logs") ginkgo.By("Verifying logs")
@ -353,10 +353,10 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
framework.Failf("Couldn't create pod: %v", err) e2elog.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
framework.Failf("Pod did not start running: %v", err) e2elog.Failf("Pod did not start running: %v", err)
} }
req := f.ClientSet.CoreV1().RESTClient().Get(). req := f.ClientSet.CoreV1().RESTClient().Get().
@ -369,7 +369,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
url := req.URL() url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"v4.channel.k8s.io"}) ws, err := framework.OpenWebSocketForURL(url, config, []string{"v4.channel.k8s.io"})
if err != nil { if err != nil {
framework.Failf("Failed to open websocket to %s: %v", url.String(), err) e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err)
} }
defer ws.Close() defer ws.Close()
@ -404,7 +404,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
ginkgo.By("Sending the expected data to the local port") ginkgo.By("Sending the expected data to the local port")
err = wsWrite(ws, 0, []byte("def")) err = wsWrite(ws, 0, []byte("def"))
if err != nil { if err != nil {
framework.Failf("Failed to write to websocket %s: %v", url.String(), err) e2elog.Failf("Failed to write to websocket %s: %v", url.String(), err)
} }
ginkgo.By("Reading data from the local port") ginkgo.By("Reading data from the local port")

View File

@ -21,6 +21,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/lifecycle:go_default_library", "//test/e2e/lifecycle:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",

View File

@ -29,6 +29,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
func newTokenSecret(tokenID, tokenSecret string) *v1.Secret { func newTokenSecret(tokenID, tokenSecret string) *v1.Secret {
@ -83,7 +84,7 @@ func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID str
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get cluster-info configMap: %v", err) e2elog.Failf("Failed to get cluster-info configMap: %v", err)
return false, err return false, err
} }
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
@ -99,7 +100,7 @@ func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, t
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get cluster-info configMap: %v", err) e2elog.Failf("Failed to get cluster-info configMap: %v", err)
return false, err return false, err
} }
updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
@ -115,7 +116,7 @@ func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface,
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get cluster-info configMap: %v", err) e2elog.Failf("Failed to get cluster-info configMap: %v", err)
return false, err return false, err
} }
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]

View File

@ -83,7 +83,7 @@ func findRegionForZone(zone string) string {
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output() region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output()
framework.ExpectNoError(err) framework.ExpectNoError(err)
if string(region) == "" { if string(region) == "" {
framework.Failf("Region not found; zone: %s", zone) e2elog.Failf("Region not found; zone: %s", zone)
} }
return string(region) return string(region)
} }

View File

@ -28,6 +28,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
) )
@ -80,7 +81,7 @@ func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute) conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
if err == nil { if err == nil {
conn.Close() conn.Close()
framework.Failf("port %d is not disabled", port) e2elog.Failf("port %d is not disabled", port)
} }
} }
} }

View File

@ -47,7 +47,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
systemPodsNo = int32(len(systemPods)) systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else { } else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup group = framework.TestContext.CloudConfig.NodeInstanceGroup
} }
@ -70,7 +70,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
ginkgo.By("restoring the original node instance group size") ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err) e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
@ -85,11 +85,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
time.Sleep(5 * time.Minute) time.Sleep(5 * time.Minute)
} }
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err) e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
} }
if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err) e2elog.Failf("Couldn't restore the original cluster size: %v", err)
} }
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.

View File

@ -168,7 +168,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
} }
} }
framework.Failf("Test failed; at least one node failed to reboot in the time given.") e2elog.Failf("Test failed; at least one node failed to reboot in the time given.")
} }
} }

View File

@ -25,6 +25,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -55,7 +56,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
systemPodsNo = int32(len(systemPods)) systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else { } else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup group = framework.TestContext.CloudConfig.NodeInstanceGroup
} }
@ -80,7 +81,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
ginkgo.By("restoring the original node instance group size") ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err) e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
@ -95,11 +96,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
time.Sleep(5 * time.Minute) time.Sleep(5 * time.Minute)
} }
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err) e2elog.Failf("Couldn't restore the original node instance group size: %v", err)
} }
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil { if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err) e2elog.Failf("Couldn't restore the original cluster size: %v", err)
} }
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.

View File

@ -75,7 +75,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
} }
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods) printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods)
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.")
} }
}) })
@ -99,7 +99,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
// that the names match because that's implementation specific. // that the names match because that's implementation specific.
ginkgo.By("ensuring the same number of nodes exist after the restart") ginkgo.By("ensuring the same number of nodes exist after the restart")
if len(originalNodes) != len(nodesAfter) { if len(originalNodes) != len(nodesAfter) {
framework.Failf("Had %d nodes before nodes were restarted, but now only have %d", e2elog.Failf("Had %d nodes before nodes were restarted, but now only have %d",
len(originalNodes), len(nodesAfter)) len(originalNodes), len(nodesAfter))
} }
@ -114,7 +114,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
pods := ps.List() pods := ps.List()
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods) printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods)
framework.Failf("At least one pod wasn't running and ready after the restart.") e2elog.Failf("At least one pod wasn't running and ready after the restart.")
} }
}) })
}) })

View File

@ -392,7 +392,7 @@ var _ = SIGDescribe("DNS", func() {
defer func() { defer func() {
e2elog.Logf("Deleting pod %s...", testAgnhostPod.Name) e2elog.Logf("Deleting pod %s...", testAgnhostPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
} }
}() }()
err = f.WaitForPodRunning(testAgnhostPod.Name) err = f.WaitForPodRunning(testAgnhostPod.Name)
@ -415,13 +415,13 @@ var _ = SIGDescribe("DNS", func() {
ginkgo.By("Verifying customized DNS suffix list is configured on pod...") ginkgo.By("Verifying customized DNS suffix list is configured on pod...")
stdout := runCommand("dns-suffix") stdout := runCommand("dns-suffix")
if !strings.Contains(stdout, testSearchPath) { if !strings.Contains(stdout, testSearchPath) {
framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout) e2elog.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout)
} }
ginkgo.By("Verifying customized DNS server is configured on pod...") ginkgo.By("Verifying customized DNS server is configured on pod...")
stdout = runCommand("dns-server-list") stdout = runCommand("dns-server-list")
if !strings.Contains(stdout, testServerIP) { if !strings.Contains(stdout, testServerIP) {
framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout) e2elog.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout)
} }
}) })
@ -441,7 +441,7 @@ var _ = SIGDescribe("DNS", func() {
defer func() { defer func() {
e2elog.Logf("Deleting pod %s...", testServerPod.Name) e2elog.Logf("Deleting pod %s...", testServerPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
} }
}() }()
err = f.WaitForPodRunning(testServerPod.Name) err = f.WaitForPodRunning(testServerPod.Name)
@ -473,7 +473,7 @@ var _ = SIGDescribe("DNS", func() {
defer func() { defer func() {
e2elog.Logf("Deleting pod %s...", testUtilsPod.Name) e2elog.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
} }
}() }()
err = f.WaitForPodRunning(testUtilsPod.Name) err = f.WaitForPodRunning(testUtilsPod.Name)
@ -492,7 +492,7 @@ var _ = SIGDescribe("DNS", func() {
}) })
framework.ExpectNoError(err, "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err) framework.ExpectNoError(err, "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
if !strings.Contains(stdout, "ndots:2") { if !strings.Contains(stdout, "ndots:2") {
framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout) e2elog.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
} }
ginkgo.By("Verifying customized name server and search path are working...") ginkgo.By("Verifying customized name server and search path are working...")

View File

@ -100,7 +100,7 @@ func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string)
}) })
if err != nil { if err != nil {
framework.Failf("dig result did not match: %#v after %v", e2elog.Failf("dig result did not match: %#v after %v",
actual, timeout) actual, timeout)
} }
} }
@ -525,7 +525,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
framework.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err) e2elog.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
} else { } else {
e2elog.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err) e2elog.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
} }
@ -553,7 +553,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) e2elog.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -561,7 +561,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
ginkgo.By("retrieving the pod") ginkgo.By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) e2elog.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
// Try to find results for each expected name. // Try to find results for each expected name.
ginkgo.By("looking for the results for each expected name from probers") ginkgo.By("looking for the results for each expected name from probers")
@ -581,7 +581,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) e2elog.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -589,7 +589,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
ginkgo.By("retrieving the pod") ginkgo.By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) e2elog.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
// Try to find the expected value for each expected name. // Try to find the expected value for each expected name.
ginkgo.By("looking for the results for each expected name from probers") ginkgo.By("looking for the results for each expected name from probers")

View File

@ -128,7 +128,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
pods, err := c.CoreV1().Pods(namespaces[0].Name).List(options) pods, err := c.CoreV1().Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 { if err != nil || pods == nil || len(pods.Items) == 0 {
framework.Failf("no running pods found") e2elog.Failf("no running pods found")
} }
podName := pods.Items[0].Name podName := pods.Items[0].Name

View File

@ -77,7 +77,7 @@ var _ = SIGDescribe("Firewall rule", func() {
gomega.Expect(nodeList).NotTo(gomega.BeNil()) gomega.Expect(nodeList).NotTo(gomega.BeNil())
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests) nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
if len(nodesNames) <= 0 { if len(nodesNames) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", nodesNames) e2elog.Failf("Expect at least 1 node, got: %v", nodesNames)
} }
nodesSet := sets.NewString(nodesNames...) nodesSet := sets.NewString(nodesNames...)
@ -177,7 +177,7 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.It("should have correct firewall rules for e2e cluster", func() { ginkgo.It("should have correct firewall rules for e2e cluster", func() {
nodes := framework.GetReadySchedulableNodesOrDie(cs) nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) <= 0 { if len(nodes.Items) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items)) e2elog.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
} }
ginkgo.By("Checking if e2e firewall rules are correct") ginkgo.By("Checking if e2e firewall rules are correct")
@ -191,7 +191,7 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.By("Checking well known ports on master and nodes are not exposed externally") ginkgo.By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := e2enode.FirstAddress(nodes, v1.NodeExternalIP) nodeAddrs := e2enode.FirstAddress(nodes, v1.NodeExternalIP)
if len(nodeAddrs) == 0 { if len(nodeAddrs) == 0 {
framework.Failf("did not find any node addresses") e2elog.Failf("did not find any node addresses")
} }
masterAddresses := framework.GetAllMasterAddresses(cs) masterAddresses := framework.GetAllMasterAddresses(cs)
@ -208,9 +208,9 @@ var _ = SIGDescribe("Firewall rule", func() {
func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) { func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) {
result := framework.PokeHTTP(ip, port, "/", &framework.HTTPPokeParams{Timeout: timeout}) result := framework.PokeHTTP(ip, port, "/", &framework.HTTPPokeParams{Timeout: timeout})
if result.Status == framework.HTTPError { if result.Status == framework.HTTPError {
framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error) e2elog.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error)
} }
if result.Code != 0 { if result.Code != 0 {
framework.Failf("Was unexpectedly able to reach %s:%d", ip, port) e2elog.Failf("Was unexpectedly able to reach %s:%d", ip, port)
} }
} }

View File

@ -201,7 +201,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" || if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" ||
annotations[tpKey] != "" || annotations[fwsKey] != "" || annotations[tpsKey] != "" || annotations[tpKey] != "" || annotations[fwsKey] != "" || annotations[tpsKey] != "" ||
annotations[scKey] != "" || annotations[beKey] != "") { annotations[scKey] != "" || annotations[beKey] != "") {
framework.Failf("unexpected annotations. Expected to not have annotations for urlmap, forwarding rule, target proxy, ssl cert and backends, got: %v", annotations) e2elog.Failf("unexpected annotations. Expected to not have annotations for urlmap, forwarding rule, target proxy, ssl cert and backends, got: %v", annotations)
return true, nil return true, nil
} }
return false, nil return false, nil
@ -210,26 +210,26 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Verify that the controller does not create any other resource except instance group. // Verify that the controller does not create any other resource except instance group.
// TODO(59778): Check GCE resources specific to this ingress instead of listing all resources. // TODO(59778): Check GCE resources specific to this ingress instead of listing all resources.
if len(gceController.ListURLMaps()) != 0 { if len(gceController.ListURLMaps()) != 0 {
framework.Failf("unexpected url maps, expected none, got: %v", gceController.ListURLMaps()) e2elog.Failf("unexpected url maps, expected none, got: %v", gceController.ListURLMaps())
} }
if len(gceController.ListGlobalForwardingRules()) != 0 { if len(gceController.ListGlobalForwardingRules()) != 0 {
framework.Failf("unexpected forwarding rules, expected none, got: %v", gceController.ListGlobalForwardingRules()) e2elog.Failf("unexpected forwarding rules, expected none, got: %v", gceController.ListGlobalForwardingRules())
} }
if len(gceController.ListTargetHTTPProxies()) != 0 { if len(gceController.ListTargetHTTPProxies()) != 0 {
framework.Failf("unexpected target http proxies, expected none, got: %v", gceController.ListTargetHTTPProxies()) e2elog.Failf("unexpected target http proxies, expected none, got: %v", gceController.ListTargetHTTPProxies())
} }
if len(gceController.ListTargetHTTPSProxies()) != 0 { if len(gceController.ListTargetHTTPSProxies()) != 0 {
framework.Failf("unexpected target https proxies, expected none, got: %v", gceController.ListTargetHTTPProxies()) e2elog.Failf("unexpected target https proxies, expected none, got: %v", gceController.ListTargetHTTPProxies())
} }
if len(gceController.ListSslCertificates()) != 0 { if len(gceController.ListSslCertificates()) != 0 {
framework.Failf("unexpected ssl certificates, expected none, got: %v", gceController.ListSslCertificates()) e2elog.Failf("unexpected ssl certificates, expected none, got: %v", gceController.ListSslCertificates())
} }
if len(gceController.ListGlobalBackendServices()) != 0 { if len(gceController.ListGlobalBackendServices()) != 0 {
framework.Failf("unexpected backend service, expected none, got: %v", gceController.ListGlobalBackendServices()) e2elog.Failf("unexpected backend service, expected none, got: %v", gceController.ListGlobalBackendServices())
} }
// Controller does not have a list command for firewall rule. We use get instead. // Controller does not have a list command for firewall rule. We use get instead.
if fw, err := gceController.GetFirewallRuleOrError(); err == nil { if fw, err := gceController.GetFirewallRuleOrError(); err == nil {
framework.Failf("unexpected nil error in getting firewall rule, expected firewall NotFound, got firewall: %v", fw) e2elog.Failf("unexpected nil error in getting firewall rule, expected firewall NotFound, got firewall: %v", fw)
} }
// TODO(nikhiljindal): Check the instance group annotation value and verify with a multizone cluster. // TODO(nikhiljindal): Check the instance group annotation value and verify with a multizone cluster.
@ -662,16 +662,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
filePath := filepath.Join(framework.TestContext.OutputDir, "mci.yaml") filePath := filepath.Join(framework.TestContext.OutputDir, "mci.yaml")
output, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath) output, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath)
if err != nil { if err != nil {
framework.Failf("unexpected error in running kubemci remove-clusters command to remove from all clusters: %s", err) e2elog.Failf("unexpected error in running kubemci remove-clusters command to remove from all clusters: %s", err)
} }
if !strings.Contains(output, "You should use kubemci delete to delete the ingress completely") { if !strings.Contains(output, "You should use kubemci delete to delete the ingress completely") {
framework.Failf("unexpected output in removing an ingress from all clusters, expected the output to include: You should use kubemci delete to delete the ingress completely, actual output: %s", output) e2elog.Failf("unexpected output in removing an ingress from all clusters, expected the output to include: You should use kubemci delete to delete the ingress completely, actual output: %s", output)
} }
// Verify that the ingress is still spread to 1 cluster as expected. // Verify that the ingress is still spread to 1 cluster as expected.
verifyKubemciStatusHas(name, "is spread across 1 cluster") verifyKubemciStatusHas(name, "is spread across 1 cluster")
// remove-clusters should succeed with --force=true // remove-clusters should succeed with --force=true
if _, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath, "--force=true"); err != nil { if _, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath, "--force=true"); err != nil {
framework.Failf("unexpected error in running kubemci remove-clusters to remove from all clusters with --force=true: %s", err) e2elog.Failf("unexpected error in running kubemci remove-clusters to remove from all clusters with --force=true: %s", err)
} }
verifyKubemciStatusHas(name, "is spread across 0 cluster") verifyKubemciStatusHas(name, "is spread across 0 cluster")
}) })
@ -765,10 +765,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
func verifyKubemciStatusHas(name, expectedSubStr string) { func verifyKubemciStatusHas(name, expectedSubStr string) {
statusStr, err := framework.RunKubemciCmd("get-status", name) statusStr, err := framework.RunKubemciCmd("get-status", name)
if err != nil { if err != nil {
framework.Failf("unexpected error in running kubemci get-status %s: %s", name, err) e2elog.Failf("unexpected error in running kubemci get-status %s: %s", name, err)
} }
if !strings.Contains(statusStr, expectedSubStr) { if !strings.Contains(statusStr, expectedSubStr) {
framework.Failf("expected status to have sub string %s, actual status: %s", expectedSubStr, statusStr) e2elog.Failf("expected status to have sub string %s, actual status: %s", expectedSubStr, statusStr)
} }
} }
@ -843,7 +843,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
defer func() { defer func() {
ginkgo.By("Cleaning up re-encryption ingress, service and deployment") ginkgo.By("Cleaning up re-encryption ingress, service and deployment")
if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 { if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs) e2elog.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs)
} }
}() }()
framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress") framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress")

View File

@ -18,6 +18,7 @@ package network
import ( import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/network/scale" "k8s.io/kubernetes/test/e2e/network/scale"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -44,19 +45,19 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig) scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig)
if err := scaleFramework.PrepareScaleTest(); err != nil { if err := scaleFramework.PrepareScaleTest(); err != nil {
framework.Failf("Unexpected error while preparing ingress scale test: %v", err) e2elog.Failf("Unexpected error while preparing ingress scale test: %v", err)
} }
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 { if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 {
framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) e2elog.Failf("Unexpected error while cleaning up ingress scale test: %v", errs)
} }
}) })
ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() {
if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { if errs := scaleFramework.RunScaleTest(); len(errs) != 0 {
framework.Failf("Unexpected error while running ingress scale test: %v", errs) e2elog.Failf("Unexpected error while running ingress scale test: %v", errs)
} }
}) })

View File

@ -541,7 +541,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
defer func() { defer func() {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) e2elog.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
} }
}() }()
@ -555,7 +555,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
// Collect pod logs when we see a failure. // Collect pod logs when we see a failure.
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
if logErr != nil { if logErr != nil {
framework.Failf("Error getting container logs: %s", logErr) e2elog.Failf("Error getting container logs: %s", logErr)
} }
// Collect current NetworkPolicies applied in the test namespace. // Collect current NetworkPolicies applied in the test namespace.
@ -575,7 +575,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String())) pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
} }
framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podName, service.Name, logs, policies.Items, pods) e2elog.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podName, service.Name, logs, policies.Items, pods)
// Dump debug information for the test namespace. // Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
@ -588,7 +588,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
defer func() { defer func() {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) e2elog.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
} }
}() }()
@ -601,7 +601,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
// Collect pod logs when we see a failure. // Collect pod logs when we see a failure.
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
if logErr != nil { if logErr != nil {
framework.Failf("Error getting container logs: %s", logErr) e2elog.Failf("Error getting container logs: %s", logErr)
} }
// Collect current NetworkPolicies applied in the test namespace. // Collect current NetworkPolicies applied in the test namespace.
@ -621,7 +621,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String())) pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
} }
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podName, service.Name, logs, policies.Items, pods) e2elog.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podName, service.Name, logs, policies.Items, pods)
// Dump debug information for the test namespace. // Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
@ -712,11 +712,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) {
ginkgo.By("Cleaning up the server.") ginkgo.By("Cleaning up the server.")
if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) e2elog.Failf("unable to cleanup pod %v: %v", pod.Name, err)
} }
ginkgo.By("Cleaning up the server's service.") ginkgo.By("Cleaning up the server's service.")
if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil {
framework.Failf("unable to cleanup svc %v: %v", service.Name, err) e2elog.Failf("unable to cleanup svc %v: %v", service.Name, err)
} }
} }
@ -756,6 +756,6 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) {
ginkgo.By("Cleaning up the policy.") ginkgo.By("Cleaning up the policy.")
if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil { if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil {
framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) e2elog.Failf("unable to cleanup policy %v: %v", policy.Name, err)
} }
} }

View File

@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -38,10 +39,10 @@ var _ = SIGDescribe("Networking", func() {
ginkgo.By("Executing a successful http request from the external internet") ginkgo.By("Executing a successful http request from the external internet")
resp, err := http.Get("http://google.com") resp, err := http.Get("http://google.com")
if err != nil { if err != nil {
framework.Failf("Unable to connect/talk to the internet: %v", err) e2elog.Failf("Unable to connect/talk to the internet: %v", err)
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp) e2elog.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
} }
}) })
@ -79,7 +80,7 @@ var _ = SIGDescribe("Networking", func() {
AbsPath(test.path). AbsPath(test.path).
DoRaw() DoRaw()
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data)) e2elog.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data))
} }
} }
}) })
@ -207,13 +208,13 @@ var _ = SIGDescribe("Networking", func() {
// Check if number of endpoints returned are exactly one. // Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks) eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks)
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) e2elog.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
} }
if len(eps) == 0 { if len(eps) == 0 {
framework.Failf("Unexpected no endpoints return") e2elog.Failf("Unexpected no endpoints return")
} }
if len(eps) > 1 { if len(eps) > 1 {
framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps) e2elog.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
} }
}) })
@ -224,13 +225,13 @@ var _ = SIGDescribe("Networking", func() {
// Check if number of endpoints returned are exactly one. // Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks) eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks)
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) e2elog.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
} }
if len(eps) == 0 { if len(eps) == 0 {
framework.Failf("Unexpected no endpoints return") e2elog.Failf("Unexpected no endpoints return")
} }
if len(eps) > 1 { if len(eps) > 1 {
framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps) e2elog.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
} }
}) })
}) })

View File

@ -87,7 +87,7 @@ func networkingIPerfTest(isIPv6 bool) {
) )
if err != nil { if err != nil {
framework.Failf("Fatal error waiting for iperf server endpoint : %v", err) e2elog.Failf("Fatal error waiting for iperf server endpoint : %v", err)
} }
iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp( iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
@ -134,9 +134,9 @@ func networkingIPerfTest(isIPv6 bool) {
pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout) pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
if err2 != nil { if err2 != nil {
framework.Failf("Error in wait...") e2elog.Failf("Error in wait...")
} else if len(pods) < expectedCli { } else if len(pods) < expectedCli {
framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout) e2elog.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
} else { } else {
// For each builds up a collection of IPerfRecords // For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach( iperfClusterVerification.ForEach(
@ -146,7 +146,7 @@ func networkingIPerfTest(isIPv6 bool) {
e2elog.Logf(resultS) e2elog.Logf(resultS)
iperfResults.Add(NewIPerf(resultS)) iperfResults.Add(NewIPerf(resultS))
} else { } else {
framework.Failf("Unexpected error, %v when running forEach on the pods.", err) e2elog.Failf("Unexpected error, %v when running forEach on the pods.", err)
} }
}) })
} }

View File

@ -255,7 +255,7 @@ var _ = SIGDescribe("Proxy", func() {
e2elog.Logf("Pod %s has the following error logs: %s", pods[0].Name, body) e2elog.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
} }
framework.Failf(strings.Join(errs, "\n")) e2elog.Failf(strings.Join(errs, "\n"))
} }
}) })
}) })

View File

@ -349,7 +349,7 @@ var _ = SIGDescribe("Services", func() {
hosts, err := e2essh.NodeSSHHosts(cs) hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node") framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") e2elog.Failf("No ssh-able nodes")
} }
host := hosts[0] host := hosts[0]
@ -374,7 +374,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3, ns)
if svc2IP == svc3IP { if svc2IP == svc3IP {
framework.Failf("service IPs conflict: %v", svc2IP) e2elog.Failf("service IPs conflict: %v", svc2IP)
} }
ginkgo.By("verifying service " + svc2 + " is still up") ginkgo.By("verifying service " + svc2 + " is still up")
@ -407,13 +407,13 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
if svc1IP == svc2IP { if svc1IP == svc2IP {
framework.Failf("VIPs conflict: %v", svc1IP) e2elog.Failf("VIPs conflict: %v", svc1IP)
} }
hosts, err := e2essh.NodeSSHHosts(cs) hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node") framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") e2elog.Failf("No ssh-able nodes")
} }
host := hosts[0] host := hosts[0]
@ -422,7 +422,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host)) ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host))
if err := framework.RestartKubeProxy(host); err != nil { if err := framework.RestartKubeProxy(host); err != nil {
framework.Failf("error restarting kube-proxy: %v", err) e2elog.Failf("error restarting kube-proxy: %v", err)
} }
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
@ -434,7 +434,7 @@ var _ = SIGDescribe("Services", func() {
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, framework.TestContext.Provider) sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
framework.Failf("couldn't remove iptable rules: %v", err) e2elog.Failf("couldn't remove iptable rules: %v", err)
} }
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
@ -459,7 +459,7 @@ var _ = SIGDescribe("Services", func() {
hosts, err := e2essh.NodeSSHHosts(cs) hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node") framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") e2elog.Failf("No ssh-able nodes")
} }
host := hosts[0] host := hosts[0]
@ -468,11 +468,11 @@ var _ = SIGDescribe("Services", func() {
// Restart apiserver // Restart apiserver
ginkgo.By("Restarting apiserver") ginkgo.By("Restarting apiserver")
if err := framework.RestartApiserver(cs); err != nil { if err := framework.RestartApiserver(cs); err != nil {
framework.Failf("error restarting apiserver: %v", err) e2elog.Failf("error restarting apiserver: %v", err)
} }
ginkgo.By("Waiting for apiserver to come up by polling /healthz") ginkgo.By("Waiting for apiserver to come up by polling /healthz")
if err := framework.WaitForApiserverUp(cs); err != nil { if err := framework.WaitForApiserverUp(cs); err != nil {
framework.Failf("error while waiting for apiserver up: %v", err) e2elog.Failf("error while waiting for apiserver up: %v", err)
} }
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
@ -484,7 +484,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
if svc1IP == svc2IP { if svc1IP == svc2IP {
framework.Failf("VIPs conflict: %v", svc1IP) e2elog.Failf("VIPs conflict: %v", svc1IP)
} }
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
@ -520,7 +520,7 @@ var _ = SIGDescribe("Services", func() {
cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort)
stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil { if err != nil {
framework.Failf("expected node port %d to be in use, stdout: %v. err: %v", nodePort, stdout, err) e2elog.Failf("expected node port %d to be in use, stdout: %v. err: %v", nodePort, stdout, err)
} }
}) })
@ -569,7 +569,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("verifying that TCP and UDP use the same port") ginkgo.By("verifying that TCP and UDP use the same port")
if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
framework.Failf("expected to use the same port for TCP and UDP") e2elog.Failf("expected to use the same port for TCP and UDP")
} }
svcPort := int(tcpService.Spec.Ports[0].Port) svcPort := int(tcpService.Spec.Ports[0].Port)
e2elog.Logf("service port (TCP and UDP): %d", svcPort) e2elog.Logf("service port (TCP and UDP): %d", svcPort)
@ -655,10 +655,10 @@ var _ = SIGDescribe("Services", func() {
tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout) tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) e2elog.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
} }
if requestedIP != "" && framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { if requestedIP != "" && framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) e2elog.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
} }
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
e2elog.Logf("TCP load balancer: %s", tcpIngressIP) e2elog.Logf("TCP load balancer: %s", tcpIngressIP)
@ -675,7 +675,7 @@ var _ = SIGDescribe("Services", func() {
// Deleting it after it is attached "demotes" it to an // Deleting it after it is attached "demotes" it to an
// ephemeral IP, which can be auto-released. // ephemeral IP, which can be auto-released.
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
framework.Failf("failed to release static IP %s: %v", staticIPName, err) e2elog.Failf("failed to release static IP %s: %v", staticIPName, err)
} }
staticIPName = "" staticIPName = ""
} }
@ -688,14 +688,14 @@ var _ = SIGDescribe("Services", func() {
udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout) udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout)
jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer) jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) e2elog.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
} }
udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
e2elog.Logf("UDP load balancer: %s", udpIngressIP) e2elog.Logf("UDP load balancer: %s", udpIngressIP)
ginkgo.By("verifying that TCP and UDP use different load balancers") ginkgo.By("verifying that TCP and UDP use different load balancers")
if tcpIngressIP == udpIngressIP { if tcpIngressIP == udpIngressIP {
framework.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) e2elog.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
} }
} }
@ -721,10 +721,10 @@ var _ = SIGDescribe("Services", func() {
tcpNodePortOld := tcpNodePort tcpNodePortOld := tcpNodePort
tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
if tcpNodePort == tcpNodePortOld { if tcpNodePort == tcpNodePortOld {
framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
} }
if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
} }
e2elog.Logf("TCP node port: %d", tcpNodePort) e2elog.Logf("TCP node port: %d", tcpNodePort)
@ -738,10 +738,10 @@ var _ = SIGDescribe("Services", func() {
udpNodePortOld := udpNodePort udpNodePortOld := udpNodePort
udpNodePort = int(udpService.Spec.Ports[0].NodePort) udpNodePort = int(udpService.Spec.Ports[0].NodePort)
if udpNodePort == udpNodePortOld { if udpNodePort == udpNodePortOld {
framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
} }
if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
} }
e2elog.Logf("UDP node port: %d", udpNodePort) e2elog.Logf("UDP node port: %d", udpNodePort)
@ -775,13 +775,13 @@ var _ = SIGDescribe("Services", func() {
svcPortOld := svcPort svcPortOld := svcPort
svcPort = int(tcpService.Spec.Ports[0].Port) svcPort = int(tcpService.Spec.Ports[0].Port)
if svcPort == svcPortOld { if svcPort == svcPortOld {
framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) e2elog.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
} }
if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
} }
if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
} }
ginkgo.By("changing the UDP service's port") ginkgo.By("changing the UDP service's port")
@ -794,13 +794,13 @@ var _ = SIGDescribe("Services", func() {
jig.SanityCheckService(udpService, v1.ServiceTypeNodePort) jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
} }
if int(udpService.Spec.Ports[0].Port) != svcPort { if int(udpService.Spec.Ports[0].Port) != svcPort {
framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) e2elog.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
} }
if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
} }
if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
} }
e2elog.Logf("service port (TCP and UDP): %d", svcPort) e2elog.Logf("service port (TCP and UDP): %d", svcPort)
@ -928,11 +928,11 @@ var _ = SIGDescribe("Services", func() {
}) })
jig.SanityCheckService(newService, v1.ServiceTypeNodePort) jig.SanityCheckService(newService, v1.ServiceTypeNodePort)
if len(newService.Spec.Ports) != 2 { if len(newService.Spec.Ports) != 2 {
framework.Failf("new service should have two Ports") e2elog.Failf("new service should have two Ports")
} }
for _, port := range newService.Spec.Ports { for _, port := range newService.Spec.Ports {
if port.NodePort == 0 { if port.NodePort == 0 {
framework.Failf("new service failed to allocate NodePort for Port %s", port.Name) e2elog.Failf("new service failed to allocate NodePort for Port %s", port.Name)
} }
e2elog.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name) e2elog.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name)
@ -1043,7 +1043,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
if len(errs) != 0 { if len(errs) != 0 {
framework.Failf("errors in cleanup: %v", errs) e2elog.Failf("errors in cleanup: %v", errs)
} }
}() }()
@ -1074,10 +1074,10 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
if len(result.Spec.Ports) != 2 { if len(result.Spec.Ports) != 2 {
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
} }
if result.Spec.Ports[0].NodePort != result.Spec.Ports[1].NodePort { if result.Spec.Ports[0].NodePort != result.Spec.Ports[1].NodePort {
framework.Failf("should use same NodePort for new service: %v", result) e2elog.Failf("should use same NodePort for new service: %v", result)
} }
}) })
@ -1093,7 +1093,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
if len(errs) != 0 { if len(errs) != 0 {
framework.Failf("errors in cleanup: %v", errs) e2elog.Failf("errors in cleanup: %v", errs)
} }
}() }()
@ -1104,14 +1104,14 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns)
if result.Spec.Type != v1.ServiceTypeNodePort { if result.Spec.Type != v1.ServiceTypeNodePort {
framework.Failf("got unexpected Spec.Type for new service: %v", result) e2elog.Failf("got unexpected Spec.Type for new service: %v", result)
} }
if len(result.Spec.Ports) != 1 { if len(result.Spec.Ports) != 1 {
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
} }
port := result.Spec.Ports[0] port := result.Spec.Ports[0]
if port.NodePort == 0 { if port.NodePort == 0 {
framework.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result) e2elog.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result)
} }
ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort") ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort")
@ -1121,7 +1121,7 @@ var _ = SIGDescribe("Services", func() {
service2.Spec.Ports[0].NodePort = port.NodePort service2.Spec.Ports[0].NodePort = port.NodePort
result2, err := t.CreateService(service2) result2, err := t.CreateService(service2)
if err == nil { if err == nil {
framework.Failf("Created service with conflicting NodePort: %v", result2) e2elog.Failf("Created service with conflicting NodePort: %v", result2)
} }
expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort) expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort)
gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
@ -1145,7 +1145,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
if len(errs) != 0 { if len(errs) != 0 {
framework.Failf("errors in cleanup: %v", errs) e2elog.Failf("errors in cleanup: %v", errs)
} }
}() }()
@ -1157,17 +1157,17 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
if service.Spec.Type != v1.ServiceTypeNodePort { if service.Spec.Type != v1.ServiceTypeNodePort {
framework.Failf("got unexpected Spec.Type for new service: %v", service) e2elog.Failf("got unexpected Spec.Type for new service: %v", service)
} }
if len(service.Spec.Ports) != 1 { if len(service.Spec.Ports) != 1 {
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service) e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
} }
port := service.Spec.Ports[0] port := service.Spec.Ports[0]
if port.NodePort == 0 { if port.NodePort == 0 {
framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
} }
if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) {
framework.Failf("got unexpected (out-of-range) port for new service: %v", service) e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service)
} }
outOfRangeNodePort := 0 outOfRangeNodePort := 0
@ -1183,7 +1183,7 @@ var _ = SIGDescribe("Services", func() {
s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
}) })
if err == nil { if err == nil {
framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result) e2elog.Failf("failed to prevent update of service with out-of-range NodePort: %v", result)
} }
expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort) expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort)
gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
@ -1198,7 +1198,7 @@ var _ = SIGDescribe("Services", func() {
service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
service, err = t.CreateService(service) service, err = t.CreateService(service)
if err == nil { if err == nil {
framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) e2elog.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service)
} }
gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
}) })
@ -1213,7 +1213,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
if len(errs) != 0 { if len(errs) != 0 {
framework.Failf("errors in cleanup: %v", errs) e2elog.Failf("errors in cleanup: %v", errs)
} }
}() }()
@ -1225,17 +1225,17 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
if service.Spec.Type != v1.ServiceTypeNodePort { if service.Spec.Type != v1.ServiceTypeNodePort {
framework.Failf("got unexpected Spec.Type for new service: %v", service) e2elog.Failf("got unexpected Spec.Type for new service: %v", service)
} }
if len(service.Spec.Ports) != 1 { if len(service.Spec.Ports) != 1 {
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service) e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
} }
port := service.Spec.Ports[0] port := service.Spec.Ports[0]
if port.NodePort == 0 { if port.NodePort == 0 {
framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
} }
if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) {
framework.Failf("got unexpected (out-of-range) port for new service: %v", service) e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service)
} }
nodePort := port.NodePort nodePort := port.NodePort
@ -1255,7 +1255,7 @@ var _ = SIGDescribe("Services", func() {
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout) e2elog.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout)
} }
ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort))
@ -1275,7 +1275,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
errs := t.Cleanup() errs := t.Cleanup()
if len(errs) != 0 { if len(errs) != 0 {
framework.Failf("errors in cleanup: %v", errs) e2elog.Failf("errors in cleanup: %v", errs)
} }
}() }()
@ -1347,7 +1347,7 @@ var _ = SIGDescribe("Services", func() {
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout)
} }
ginkgo.By("Scaling down replication controller to zero") ginkgo.By("Scaling down replication controller to zero")
@ -1370,7 +1370,7 @@ var _ = SIGDescribe("Services", func() {
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout)
} }
ginkgo.By("Update service to tolerate unready services again") ginkgo.By("Update service to tolerate unready services again")
@ -1390,7 +1390,7 @@ var _ = SIGDescribe("Services", func() {
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout)
} }
ginkgo.By("Remove pods immediately") ginkgo.By("Remove pods immediately")
@ -1550,7 +1550,7 @@ var _ = SIGDescribe("Services", func() {
e2elog.Logf("Successful curl; stdout: %v", stdout) e2elog.Logf("Successful curl; stdout: %v", stdout)
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr) e2elog.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
} }
ginkgo.By("switching to external type LoadBalancer") ginkgo.By("switching to external type LoadBalancer")
@ -1566,7 +1566,7 @@ var _ = SIGDescribe("Services", func() {
lbIngress = &svc.Status.LoadBalancer.Ingress[0] lbIngress = &svc.Status.LoadBalancer.Ingress[0]
return !isInternalEndpoint(lbIngress), nil return !isInternalEndpoint(lbIngress), nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("Loadbalancer IP not changed to external.") e2elog.Failf("Loadbalancer IP not changed to external.")
} }
// should have an external IP. // should have an external IP.
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
@ -1595,7 +1595,7 @@ var _ = SIGDescribe("Services", func() {
lbIngress = &svc.Status.LoadBalancer.Ingress[0] lbIngress = &svc.Status.LoadBalancer.Ingress[0]
return isInternalEndpoint(lbIngress), nil return isInternalEndpoint(lbIngress), nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("Loadbalancer IP not changed to internal.") e2elog.Failf("Loadbalancer IP not changed to internal.")
} }
// should have the given static internal IP. // should have the given static internal IP.
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
@ -1616,11 +1616,11 @@ var _ = SIGDescribe("Services", func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
clusterID, err := gce.GetClusterID(cs) clusterID, err := gce.GetClusterID(cs)
if err != nil { if err != nil {
framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err) e2elog.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
} }
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
if err != nil { if err != nil {
framework.Failf("framework.GetGCECloud() = _, %v; want nil", err) e2elog.Failf("framework.GetGCECloud() = _, %v; want nil", err)
} }
namespace := f.Namespace.Name namespace := f.Namespace.Name
@ -1647,22 +1647,22 @@ var _ = SIGDescribe("Services", func() {
hcName := gcecloud.MakeNodesHealthCheckName(clusterID) hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
hc, err := gceCloud.GetHTTPHealthCheck(hcName) hc, err := gceCloud.GetHTTPHealthCheck(hcName)
if err != nil { if err != nil {
framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err) e2elog.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
} }
gomega.Expect(hc.CheckIntervalSec).To(gomega.Equal(gceHcCheckIntervalSeconds)) gomega.Expect(hc.CheckIntervalSec).To(gomega.Equal(gceHcCheckIntervalSeconds))
ginkgo.By("modify the health check interval") ginkgo.By("modify the health check interval")
hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil { if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err) e2elog.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
} }
ginkgo.By("restart kube-controller-manager") ginkgo.By("restart kube-controller-manager")
if err := framework.RestartControllerManager(); err != nil { if err := framework.RestartControllerManager(); err != nil {
framework.Failf("framework.RestartControllerManager() = %v; want nil", err) e2elog.Failf("framework.RestartControllerManager() = %v; want nil", err)
} }
if err := framework.WaitForControllerManagerUp(); err != nil { if err := framework.WaitForControllerManagerUp(); err != nil {
framework.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err) e2elog.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err)
} }
ginkgo.By("health check should be reconciled") ginkgo.By("health check should be reconciled")
@ -1676,7 +1676,7 @@ var _ = SIGDescribe("Services", func() {
e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec) e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds) e2elog.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
} }
}) })
@ -1779,7 +1779,7 @@ var _ = SIGDescribe("Services", func() {
hosts, err := e2essh.NodeSSHHosts(cs) hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node") framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 { if len(hosts) == 0 {
framework.Failf("No ssh-able nodes") e2elog.Failf("No ssh-able nodes")
} }
host := hosts[0] host := hosts[0]
@ -1826,7 +1826,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("creating a service with no endpoints") ginkgo.By("creating a service with no endpoints")
_, err := jig.CreateServiceWithServicePort(labels, namespace, ports) _, err := jig.CreateServiceWithServicePort(labels, namespace, ports)
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to create service: %v", err) e2elog.Failf("ginkgo.Failed to create service: %v", err)
} }
nodeName := nodes.Items[0].Name nodeName := nodes.Items[0].Name
@ -1884,7 +1884,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("Manually add load balancer cleanup finalizer to service") ginkgo.By("Manually add load balancer cleanup finalizer to service")
svc.Finalizers = append(svc.Finalizers, "service.kubernetes.io/load-balancer-cleanup") svc.Finalizers = append(svc.Finalizers, "service.kubernetes.io/load-balancer-cleanup")
if _, err := cs.CoreV1().Services(svc.Namespace).Update(svc); err != nil { if _, err := cs.CoreV1().Services(svc.Namespace).Update(svc); err != nil {
framework.Failf("Failed to add finalizer to service %s/%s: %v", svc.Namespace, svc.Name, err) e2elog.Failf("Failed to add finalizer to service %s/%s: %v", svc.Namespace, svc.Name, err)
} }
}) })
@ -1925,7 +1925,7 @@ var _ = SIGDescribe("Services", func() {
func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) { func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) {
ginkgo.By("Delete service with finalizer") ginkgo.By("Delete service with finalizer")
if err := cs.CoreV1().Services(namespace).Delete(name, nil); err != nil { if err := cs.CoreV1().Services(namespace).Delete(name, nil); err != nil {
framework.Failf("Failed to delete service %s/%s", namespace, name) e2elog.Failf("Failed to delete service %s/%s", namespace, name)
} }
ginkgo.By("Wait for service to disappear") ginkgo.By("Wait for service to disappear")
@ -1941,7 +1941,7 @@ func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name
e2elog.Logf("Service %s/%s still exists with finalizers: %v", namespace, name, svc.Finalizers) e2elog.Logf("Service %s/%s still exists with finalizers: %v", namespace, name, svc.Finalizers)
return false, nil return false, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("Failed to wait for service to disappear: %v", pollErr) e2elog.Failf("Failed to wait for service to disappear: %v", pollErr)
} }
} }
@ -1964,7 +1964,7 @@ func waitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("Failed to wait for service to hasFinalizer=%t: %v", hasFinalizer, pollErr) e2elog.Failf("Failed to wait for service to hasFinalizer=%t: %v", hasFinalizer, pollErr)
} }
} }
@ -2007,7 +2007,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
if healthCheckNodePort == 0 { if healthCheckNodePort == 0 {
framework.Failf("Service HealthCheck NodePort was not allocated") e2elog.Failf("Service HealthCheck NodePort was not allocated")
} }
defer func() { defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
@ -2032,7 +2032,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By("checking if Source IP is preserved") ginkgo.By("checking if Source IP is preserved")
if strings.HasPrefix(clientIP, "10.") { if strings.HasPrefix(clientIP, "10.") {
framework.Failf("Source IP was NOT preserved") e2elog.Failf("Source IP was NOT preserved")
} }
}) })
@ -2058,7 +2058,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
clientIP := content.String() clientIP := content.String()
e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
if strings.HasPrefix(clientIP, "10.") { if strings.HasPrefix(clientIP, "10.") {
framework.Failf("Source IP was NOT preserved") e2elog.Failf("Source IP was NOT preserved")
} }
} }
}) })
@ -2088,7 +2088,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
if healthCheckNodePort == 0 { if healthCheckNodePort == 0 {
framework.Failf("Service HealthCheck NodePort was not allocated") e2elog.Failf("Service HealthCheck NodePort was not allocated")
} }
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP) ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
@ -2175,7 +2175,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0]) srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
return srcIP == execPod.Status.PodIP, nil return srcIP == execPod.Status.PodIP, nil
}); pollErr != nil { }); pollErr != nil {
framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", podName, execPod.Status.PodIP, srcIP) e2elog.Failf("Source IP not preserved from %v, expected '%v' got '%v'", podName, execPod.Status.PodIP, srcIP)
} }
}) })
@ -2186,7 +2186,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests)
if len(nodes.Items) < 2 { if len(nodes.Items) < 2 {
framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") e2elog.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
} }
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
@ -2205,7 +2205,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
}) })
if svc.Spec.HealthCheckNodePort > 0 { if svc.Spec.HealthCheckNodePort > 0 {
framework.Failf("Service HealthCheck NodePort still present") e2elog.Failf("Service HealthCheck NodePort still present")
} }
endpointNodeMap := jig.GetEndpointNodes(svc) endpointNodeMap := jig.GetEndpointNodes(svc)
@ -2241,7 +2241,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
return false, nil return false, nil
} }
if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, pollfn); pollErr != nil { if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, pollfn); pollErr != nil {
framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", e2elog.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
nodeName, healthCheckNodePort, body.String()) nodeName, healthCheckNodePort, body.String())
} }
} }
@ -2258,7 +2258,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
return false, nil return false, nil
}) })
if pollErr != nil { if pollErr != nil {
framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP) e2elog.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
} }
// TODO: We need to attempt to create another service with the previously // TODO: We need to attempt to create another service with the previously
@ -2283,7 +2283,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
return false, nil return false, nil
}) })
if pollErr != nil { if pollErr != nil {
framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP) e2elog.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
} }
}) })
}) })
@ -2327,7 +2327,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
outputs := strings.Split(strings.TrimSpace(stdout), "=") outputs := strings.Split(strings.TrimSpace(stdout), "=")
if len(outputs) != 2 { if len(outputs) != 2 {
// ginkgo.Fail the test if output format is unexpected. // ginkgo.Fail the test if output format is unexpected.
framework.Failf("exec pod returned unexpected stdout format: [%v]\n", stdout) e2elog.Failf("exec pod returned unexpected stdout format: [%v]\n", stdout)
} }
return execPod.Status.PodIP, outputs[1] return execPod.Status.PodIP, outputs[1]
} }

View File

@ -95,7 +95,7 @@ var _ = SIGDescribe("Service endpoints latency", func() {
} }
if n < 2 { if n < 2 {
failing.Insert("Less than two runs succeeded; aborting.") failing.Insert("Less than two runs succeeded; aborting.")
framework.Failf(strings.Join(failing.List(), "\n")) e2elog.Failf(strings.Join(failing.List(), "\n"))
} }
percentile := func(p int) time.Duration { percentile := func(p int) time.Duration {
est := n * p / 100 est := n * p / 100
@ -122,7 +122,7 @@ var _ = SIGDescribe("Service endpoints latency", func() {
if failing.Len() > 0 { if failing.Len() > 0 {
errList := strings.Join(failing.List(), "\n") errList := strings.Join(failing.List(), "\n")
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99) helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
framework.Failf(errList + helpfulInfo) e2elog.Failf(errList + helpfulInfo)
} }
}) })
}) })

View File

@ -24,7 +24,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
@ -74,7 +73,7 @@ func NewIPerf(csvLine string) *IPerfResult {
csvLine = strings.Trim(csvLine, "\n") csvLine = strings.Trim(csvLine, "\n")
slice := StrSlice(strings.Split(csvLine, ",")) slice := StrSlice(strings.Split(csvLine, ","))
if len(slice) != 9 { if len(slice) != 9 {
framework.Failf("Incorrect fields in the output: %v (%v out of 9)", slice, len(slice)) e2elog.Failf("Incorrect fields in the output: %v (%v out of 9)", slice, len(slice))
} }
i := IPerfResult{} i := IPerfResult{}
i.date = slice.get(0) i.date = slice.get(0)
@ -103,7 +102,7 @@ func (s StrSlice) get(i int) string {
func intOrFail(debugName string, rawValue string) int64 { func intOrFail(debugName string, rawValue string) int64 {
value, err := strconv.ParseInt(rawValue, 10, 64) value, err := strconv.ParseInt(rawValue, 10, 64)
if err != nil { if err != nil {
framework.Failf("Failed parsing value %v from the string '%v' as an integer", debugName, rawValue) e2elog.Failf("Failed parsing value %v from the string '%v' as an integer", debugName, rawValue)
} }
return value return value
} }

View File

@ -42,7 +42,7 @@ var _ = SIGDescribe("crictl", func() {
ginkgo.By("Getting all nodes' SSH-able IP addresses") ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet) hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil { if err != nil {
framework.Failf("Error getting node hostnames: %v", err) e2elog.Failf("Error getting node hostnames: %v", err)
} }
testCases := []struct { testCases := []struct {
@ -60,7 +60,7 @@ var _ = SIGDescribe("crictl", func() {
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
if err != nil { if err != nil {
framework.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err) e2elog.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err)
} }
// Log the stdout/stderr output. // Log the stdout/stderr output.
// TODO: Verify the output. // TODO: Verify the output.

View File

@ -73,7 +73,7 @@ var _ = SIGDescribe("Events", func() {
podClient.Delete(pod.Name, nil) podClient.Delete(pod.Name, nil)
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create pod: %v", err) e2elog.Failf("Failed to create pod: %v", err)
} }
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -87,7 +87,7 @@ var _ = SIGDescribe("Events", func() {
ginkgo.By("retrieving the pod") ginkgo.By("retrieving the pod")
podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{}) podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod: %v", err) e2elog.Failf("Failed to get pod: %v", err)
} }
e2elog.Logf("%+v\n", podWithUID) e2elog.Logf("%+v\n", podWithUID)
var events *v1.EventList var events *v1.EventList

View File

@ -152,7 +152,7 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
} }
} }
if len(errList) > 0 { if len(errList) > 0 {
framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) e2elog.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
} }
} }
@ -186,7 +186,7 @@ func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.N
} }
} }
if len(errList) > 0 { if len(errList) > 0 {
framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) e2elog.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
} }
} }

View File

@ -44,7 +44,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
pod.Status.Phase = v1.PodFailed pod.Status.Phase = v1.PodFailed
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(pod) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(pod)
if err != nil { if err != nil {
framework.Failf("err failing pod: %v", err) e2elog.Failf("err failing pod: %v", err)
} }
count++ count++
@ -76,7 +76,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
return true, nil return true, nil
}) })
if pollErr != nil { if pollErr != nil {
framework.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err) e2elog.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err)
} }
}) })
}) })

View File

@ -142,7 +142,7 @@ func testPreStop(c clientset.Interface, ns string) {
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
framework.Failf("Error validating prestop: %v", err) e2elog.Failf("Error validating prestop: %v", err)
return true, err return true, err
} }
ginkgo.By(fmt.Sprintf("Error validating prestop: %v", err)) ginkgo.By(fmt.Sprintf("Error validating prestop: %v", err))

View File

@ -47,7 +47,7 @@ var _ = SIGDescribe("SSH", func() {
ginkgo.By("Getting all nodes' SSH-able IP addresses") ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet) hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil { if err != nil {
framework.Failf("Error getting node hostnames: %v", err) e2elog.Failf("Error getting node hostnames: %v", err)
} }
testCases := []struct { testCases := []struct {
@ -82,16 +82,16 @@ var _ = SIGDescribe("SSH", func() {
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
if err != testCase.expectedError { if err != testCase.expectedError {
framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError) e2elog.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
} }
if testCase.checkStdout && stdout != testCase.expectedStdout { if testCase.checkStdout && stdout != testCase.expectedStdout {
framework.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout) e2elog.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout)
} }
if stderr != testCase.expectedStderr { if stderr != testCase.expectedStderr {
framework.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr) e2elog.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr)
} }
if result.Code != testCase.expectedCode { if result.Code != testCase.expectedCode {
framework.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode) e2elog.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode)
} }
// Show stdout, stderr for logging purposes. // Show stdout, stderr for logging purposes.
if len(stdout) > 0 { if len(stdout) > 0 {
@ -106,7 +106,7 @@ var _ = SIGDescribe("SSH", func() {
// Quickly test that SSH itself errors correctly. // Quickly test that SSH itself errors correctly.
ginkgo.By("SSH'ing to a nonexistent host") ginkgo.By("SSH'ing to a nonexistent host")
if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil { if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
framework.Failf("Expected error trying to SSH to nonexistent host.") e2elog.Failf("Expected error trying to SSH to nonexistent host.")
} }
}) })
}) })

View File

@ -723,7 +723,7 @@ var _ = SIGDescribe("Density", func() {
case batch.Kind("Job"): case batch.Kind("Job"):
configs[i] = &testutils.JobConfig{RCConfig: *baseConfig} configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
default: default:
framework.Failf("Unsupported kind: %v", itArg.kind) e2elog.Failf("Unsupported kind: %v", itArg.kind)
} }
} }
@ -787,7 +787,7 @@ var _ = SIGDescribe("Density", func() {
if startTime != metav1.NewTime(time.Time{}) { if startTime != metav1.NewTime(time.Time{}) {
runTimes[p.Name] = startTime runTimes[p.Name] = startTime
} else { } else {
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name) e2elog.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
} }
} }
} }
@ -876,7 +876,7 @@ var _ = SIGDescribe("Density", func() {
waitTimeout := 10 * time.Minute waitTimeout := 10 * time.Minute
for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) { for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < waitTimeout { if time.Since(start) < waitTimeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.") e2elog.Failf("Timeout reached waiting for all Pods being observed by the watch.")
} }
} }

View File

@ -577,7 +577,7 @@ func GenerateConfigsForGroup(
case batch.Kind("Job"): case batch.Kind("Job"):
config = &testutils.JobConfig{RCConfig: *baseConfig} config = &testutils.JobConfig{RCConfig: *baseConfig}
default: default:
framework.Failf("Unsupported kind for config creation: %v", kind) e2elog.Failf("Unsupported kind for config creation: %v", kind)
} }
configs = append(configs, config) configs = append(configs, config)
} }

View File

@ -75,10 +75,10 @@ var _ = SIGDescribe("LimitRange", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event) e2elog.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(framework.ServiceRespondingTimeout): case <-time.After(framework.ServiceRespondingTimeout):
framework.Failf("Timeout while waiting for LimitRange creation") e2elog.Failf("Timeout while waiting for LimitRange creation")
} }
ginkgo.By("Fetching the LimitRange to ensure it has proper values") ginkgo.By("Fetching the LimitRange to ensure it has proper values")

View File

@ -275,7 +275,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
} }
} }
if successes != completions { if successes != completions {
framework.Failf("Only got %v completions. Expected %v completions.", successes, completions) e2elog.Failf("Only got %v completions. Expected %v completions.", successes, completions)
} }
} }

View File

@ -430,12 +430,12 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
var err error var err error
node, err = cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) node, err = cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("error getting node %q: %v", nodeName, err) e2elog.Failf("error getting node %q: %v", nodeName, err)
} }
var ok bool var ok bool
nodeHostNameLabel, ok = node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] nodeHostNameLabel, ok = node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
if !ok { if !ok {
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName) e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
} }
// update Node API object with a fake resource // update Node API object with a fake resource
@ -581,7 +581,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
for i, got := range rsPodsSeen { for i, got := range rsPodsSeen {
expected := maxRSPodsSeen[i] expected := maxRSPodsSeen[i]
if got > expected { if got > expected {
framework.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got) e2elog.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got)
} }
} }
}) })

View File

@ -328,7 +328,7 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
totalRequestedMemResource := resource.Requests.Memory().Value() totalRequestedMemResource := resource.Requests.Memory().Value()
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil { if err != nil {
framework.Failf("Expect error of invalid, got : %v", err) e2elog.Failf("Expect error of invalid, got : %v", err)
} }
for _, pod := range allpods.Items { for _, pod := range allpods.Items {
if pod.Spec.NodeName == node.Name { if pod.Spec.NodeName == node.Name {

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -119,7 +120,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName) nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: nodeSelector.String()}) nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: nodeSelector.String()})
if err != nil || len(nodeList.Items) != 1 { if err != nil || len(nodeList.Items) != 1 {
framework.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items)) e2elog.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items))
} }
node := nodeList.Items[0] node := nodeList.Items[0]
@ -139,7 +140,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
} }
if ginkgo.CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.Failf("Current e2e test has failed, so return from here.") e2elog.Failf("Current e2e test has failed, so return from here.")
return return
} }
@ -156,7 +157,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName)) ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
if !e2enode.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) { if !e2enode.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName) e2elog.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
} }
ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied") ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied")
err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30) err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30)
@ -188,7 +189,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations) seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if seconds != 200 { if seconds != 200 {
framework.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds) e2elog.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds)
} }
}) })
}) })

View File

@ -196,7 +196,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
framework.Failf("Failed to evict Pod") e2elog.Failf("Failed to evict Pod")
case <-observedDeletions: case <-observedDeletions:
e2elog.Logf("Noticed Pod eviction. Test successful") e2elog.Logf("Noticed Pod eviction. Test successful")
} }
@ -230,7 +230,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Test successful") e2elog.Logf("Pod wasn't evicted. Test successful")
case <-observedDeletions: case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration") e2elog.Failf("Pod was evicted despite toleration")
} }
}) })
@ -263,14 +263,14 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted") e2elog.Logf("Pod wasn't evicted")
case <-observedDeletions: case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration") e2elog.Failf("Pod was evicted despite toleration")
return return
} }
ginkgo.By("Waiting for Pod to be deleted") ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
framework.Failf("Pod wasn't evicted") e2elog.Failf("Pod wasn't evicted")
case <-observedDeletions: case <-observedDeletions:
e2elog.Logf("Pod was evicted after toleration time run out. Test successful") e2elog.Logf("Pod was evicted after toleration time run out. Test successful")
return return
@ -312,7 +312,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Proceeding") e2elog.Logf("Pod wasn't evicted. Proceeding")
case <-observedDeletions: case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration") e2elog.Failf("Pod was evicted despite toleration")
return return
} }
e2elog.Logf("Removing taint from Node") e2elog.Logf("Removing taint from Node")
@ -324,7 +324,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Test successful") e2elog.Logf("Pod wasn't evicted. Test successful")
case <-observedDeletions: case <-observedDeletions:
framework.Failf("Pod was evicted despite toleration") e2elog.Failf("Pod was evicted despite toleration")
} }
}) })
}) })
@ -383,9 +383,9 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
select { select {
case <-timeoutChannel: case <-timeoutChannel:
if evicted == 0 { if evicted == 0 {
framework.Failf("Failed to evict Pod1.") e2elog.Failf("Failed to evict Pod1.")
} else if evicted == 2 { } else if evicted == 2 {
framework.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.") e2elog.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.")
} }
return return
case podName := <-observedDeletions: case podName := <-observedDeletions:
@ -393,7 +393,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
if podName == podGroup+"1" { if podName == podGroup+"1" {
e2elog.Logf("Noticed Pod %q gets evicted.", podName) e2elog.Logf("Noticed Pod %q gets evicted.", podName)
} else if podName == podGroup+"2" { } else if podName == podGroup+"2" {
framework.Failf("Unexepected Pod %q gets evicted.", podName) e2elog.Failf("Unexepected Pod %q gets evicted.", podName)
return return
} }
} }
@ -418,7 +418,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
if !ok { if !ok {
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName) e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName) e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName)
@ -441,7 +441,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
for evicted != 2 { for evicted != 2 {
select { select {
case <-timeoutChannel: case <-timeoutChannel:
framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted) e2elog.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
return return
case podName := <-observedDeletions: case podName := <-observedDeletions:
e2elog.Logf("Noticed Pod %q gets evicted.", podName) e2elog.Logf("Noticed Pod %q gets evicted.", podName)

View File

@ -145,7 +145,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
if err != nil { if err != nil {
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) e2elog.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
} }
}() }()
} }

View File

@ -17,6 +17,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -136,10 +137,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event) e2elog.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(framework.PodStartTimeout): case <-time.After(framework.PodStartTimeout):
framework.Failf("Timeout while waiting for pod creation") e2elog.Failf("Timeout while waiting for pod creation")
} }
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
@ -153,15 +154,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
// check the annotation is there // check the annotation is there
if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok { if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok {
framework.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations) e2elog.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations)
} }
// verify the env is the same // verify the env is the same
if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.Containers[0].Env) { if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.Containers[0].Env) {
framework.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env) e2elog.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env)
} }
if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.InitContainers[0].Env) { if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.InitContainers[0].Env) {
framework.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env) e2elog.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env)
} }
}) })
@ -256,10 +257,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event) e2elog.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(framework.PodStartTimeout): case <-time.After(framework.PodStartTimeout):
framework.Failf("Timeout while waiting for pod creation") e2elog.Failf("Timeout while waiting for pod creation")
} }
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
@ -273,15 +274,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
// check the annotation is not there // check the annotation is not there
if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok { if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok {
framework.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations) e2elog.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations)
} }
// verify the env is the same // verify the env is the same
if !reflect.DeepEqual(originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) { if !reflect.DeepEqual(originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) {
framework.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) e2elog.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env)
} }
if !reflect.DeepEqual(originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) { if !reflect.DeepEqual(originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) {
framework.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) e2elog.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env)
} }
}) })

View File

@ -445,7 +445,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) e2elog.Failf("error updating pvc size %q", pvc.Name)
} }
if test.expectFailure { if test.expectFailure {
err = waitForResizingCondition(pvc, m.cs, csiResizingConditionWait) err = waitForResizingCondition(pvc, m.cs, csiResizingConditionWait)
@ -536,7 +536,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) e2elog.Failf("error updating pvc size %q", pvc.Name)
} }
ginkgo.By("Waiting for persistent volume resize to finish") ginkgo.By("Waiting for persistent volume resize to finish")

View File

@ -45,6 +45,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -159,7 +160,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
}, },
h.manifests...) h.manifests...)
if err != nil { if err != nil {
framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err) e2elog.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
} }
return config, func() { return config, func() {
@ -304,7 +305,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
}, },
m.manifests...) m.manifests...)
if err != nil { if err != nil {
framework.Failf("deploying csi mock driver: %v", err) e2elog.Failf("deploying csi mock driver: %v", err)
} }
return config, func() { return config, func() {
@ -420,7 +421,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
cleanup, err := f.CreateFromManifests(nil, manifests...) cleanup, err := f.CreateFromManifests(nil, manifests...)
if err != nil { if err != nil {
framework.Failf("deploying csi gce-pd driver: %v", err) e2elog.Failf("deploying csi gce-pd driver: %v", err)
} }
return &testsuites.PerTestConfig{ return &testsuites.PerTestConfig{

View File

@ -202,7 +202,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
case testpatterns.DynamicPV: case testpatterns.DynamicPV:
// Do nothing // Do nothing
default: default:
framework.Failf("Unsupported volType:%v is specified", volType) e2elog.Failf("Unsupported volType:%v is specified", volType)
} }
return nil return nil
} }
@ -317,14 +317,14 @@ func (v *glusterVolume) DeleteVolume() {
err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil)
if err != nil { if err != nil {
if !errors.IsNotFound(err) { if !errors.IsNotFound(err) {
framework.Failf("Gluster delete endpoints failed: %v", err) e2elog.Failf("Gluster delete endpoints failed: %v", err)
} }
e2elog.Logf("Gluster endpoints %q not found, assuming deleted", name) e2elog.Logf("Gluster endpoints %q not found, assuming deleted", name)
} }
e2elog.Logf("Deleting Gluster server pod %q...", v.serverPod.Name) e2elog.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
err = framework.DeletePodWithWait(f, cs, v.serverPod) err = framework.DeletePodWithWait(f, cs, v.serverPod)
if err != nil { if err != nil {
framework.Failf("Gluster server pod delete failed: %v", err) e2elog.Failf("Gluster server pod delete failed: %v", err)
} }
} }
@ -1738,7 +1738,7 @@ func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
ltr: l.ltrMgr.Create(node, l.volumeType, nil), ltr: l.ltrMgr.Create(node, l.volumeType, nil),
} }
default: default:
framework.Failf("Unsupported volType: %v is specified", volType) e2elog.Failf("Unsupported volType: %v is specified", volType)
} }
return nil return nil
} }
@ -1750,11 +1750,11 @@ func (v *localVolume) DeleteVolume() {
func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity { func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity {
nodeKey := "kubernetes.io/hostname" nodeKey := "kubernetes.io/hostname"
if node.Labels == nil { if node.Labels == nil {
framework.Failf("Node does not have labels") e2elog.Failf("Node does not have labels")
} }
nodeValue, found := node.Labels[nodeKey] nodeValue, found := node.Labels[nodeKey]
if !found { if !found {
framework.Failf("Node does not have required label %q", nodeKey) e2elog.Failf("Node does not have required label %q", nodeKey)
} }
return &v1.VolumeNodeAffinity{ return &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{ Required: &v1.NodeSelector{

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -78,7 +79,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
var err error var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
configMapVolumeName := "configmap-volume" configMapVolumeName := "configmap-volume"
@ -95,7 +96,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
} }
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &v1.Pod{ pod := &v1.Pod{
@ -147,15 +148,15 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
defer func() { defer func() {
ginkgo.By("Cleaning up the secret") ginkgo.By("Cleaning up the secret")
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err) e2elog.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
ginkgo.By("Cleaning up the configmap") ginkgo.By("Cleaning up the configmap")
if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil { if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) e2elog.Failf("unable to delete configmap %v: %v", configMap.Name, err)
} }
ginkgo.By("Cleaning up the pod") ginkgo.By("Cleaning up the pod")
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil { if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete pod %v: %v", pod.Name, err) e2elog.Failf("unable to delete pod %v: %v", pod.Name, err)
} }
}() }()
}) })
@ -253,17 +254,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
} }
if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil { if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) e2elog.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
} }
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
ginkgo.By("Cleaning up the git server pod") ginkgo.By("Cleaning up the git server pod")
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) e2elog.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
} }
ginkgo.By("Cleaning up the git server svc") ginkgo.By("Cleaning up the git server svc")
if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) e2elog.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
} }
} }
} }

View File

@ -29,6 +29,7 @@ import (
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/framework/testfiles"
@ -118,7 +119,7 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string)
} }
if host == "" { if host == "" {
framework.Failf("Error getting node ip : %v", err) e2elog.Failf("Error getting node ip : %v", err)
} }
cmd := fmt.Sprintf("sudo rm -r %s", flexDir) cmd := fmt.Sprintf("sudo rm -r %s", flexDir)
@ -139,7 +140,7 @@ func sshAndLog(cmd, host string, failOnError bool) {
e2essh.LogResult(result) e2essh.LogResult(result)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if result.Code != 0 && failOnError { if result.Code != 0 && failOnError {
framework.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr) e2elog.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr)
} }
} }

View File

@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
if len(nodeList.Items) != 0 { if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name nodeName = nodeList.Items[0].Name
} else { } else {
framework.Failf("Unable to find ready and schedulable Node") e2elog.Failf("Unable to find ready and schedulable Node")
} }
nodeKey = "mounted_flexvolume_expand" nodeKey = "mounted_flexvolume_expand"
@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
if c != nil { if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel = make(map[string]string)
@ -157,7 +157,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) e2elog.Failf("error updating pvc size %q", pvc.Name)
} }
ginkgo.By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")

View File

@ -60,7 +60,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 { if len(nodeList.Items) == 0 {
framework.Failf("unable to find ready and schedulable Node") e2elog.Failf("unable to find ready and schedulable Node")
} }
nodeName = nodeList.Items[0].Name nodeName = nodeList.Items[0].Name
@ -106,7 +106,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
if c != nil { if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel = make(map[string]string)
@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) e2elog.Failf("error updating pvc size %q", pvc.Name)
} }
ginkgo.By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")

View File

@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
if len(nodeList.Items) != 0 { if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name nodeName = nodeList.Items[0].Name
} else { } else {
framework.Failf("Unable to find ready and schedulable Node") e2elog.Failf("Unable to find ready and schedulable Node")
} }
nodeKey = "mounted_volume_expand" nodeKey = "mounted_volume_expand"
@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
if c != nil { if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel = make(map[string]string)
@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 { if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name) e2elog.Failf("error updating pvc size %q", pvc.Name)
} }
ginkgo.By("Waiting for cloudprovider resize to finish") ginkgo.By("Waiting for cloudprovider resize to finish")

View File

@ -157,11 +157,11 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
// Delete PV and PVCs // Delete PV and PVCs
if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pv1, pvc1 = nil, nil pv1, pvc1 = nil, nil
if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pv2, pvc2 = nil, nil pv2, pvc2 = nil, nil

View File

@ -366,7 +366,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
// if this defer is reached due to an Expect then nested // if this defer is reached due to an Expect then nested
// Expects are lost, so use Failf here // Expects are lost, so use Failf here
if numNodes != origNodeCnt { if numNodes != origNodeCnt {
framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt) e2elog.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)
} }
} }
}() }()
@ -520,7 +520,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
// escape if not a supported provider // escape if not a supported provider
if !(framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" || if !(framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" ||
framework.TestContext.Provider == "aws") { framework.TestContext.Provider == "aws") {
framework.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider)) e2elog.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider))
} }
containers := make([]v1.Container, numContainers) containers := make([]v1.Container, numContainers)

View File

@ -109,7 +109,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
if c != nil { if c != nil {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
clientPod, pv, pvc, node = nil, nil, nil, "" clientPod, pv, pvc, node = nil, nil, nil, ""
if diskName != "" { if diskName != "" {

View File

@ -829,7 +829,7 @@ func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) {
ginkgo.By("Cleaning up PVC and PV") ginkgo.By("Cleaning up PVC and PV")
errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc) errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc)
if len(errs) > 0 { if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
} }
} }
} }
@ -870,11 +870,11 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framewo
// TODO: hostname may not be the best option // TODO: hostname may not be the best option
nodeKey := "kubernetes.io/hostname" nodeKey := "kubernetes.io/hostname"
if volume.ltr.Node.Labels == nil { if volume.ltr.Node.Labels == nil {
framework.Failf("Node does not have labels") e2elog.Failf("Node does not have labels")
} }
nodeValue, found := volume.ltr.Node.Labels[nodeKey] nodeValue, found := volume.ltr.Node.Labels[nodeKey]
if !found { if !found {
framework.Failf("Node does not have required label %q", nodeKey) e2elog.Failf("Node does not have required label %q", nodeKey)
} }
pvConfig := framework.PersistentVolumeConfig{ pvConfig := framework.PersistentVolumeConfig{

View File

@ -154,7 +154,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources.") e2elog.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
}) })
@ -221,7 +221,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
for _, e := range errs { for _, e := range errs {
errmsg = append(errmsg, e.Error()) errmsg = append(errmsg, e.Error())
} }
framework.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; ")) e2elog.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; "))
} }
}) })
@ -270,7 +270,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources.") e2elog.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
}) })

View File

@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
e2elog.Logf("AfterEach: Cleaning up test resources.") e2elog.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 { if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
}) })

View File

@ -340,11 +340,11 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
} }
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil { if node == nil {
framework.Failf("unexpected nil node found") e2elog.Failf("unexpected nil node found")
} }
zone, ok := node.Labels[v1.LabelZoneFailureDomain] zone, ok := node.Labels[v1.LabelZoneFailureDomain]
if !ok { if !ok {
framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
} }
for _, pv := range pvs { for _, pv := range pvs {
checkZoneFromLabelAndAffinity(pv, zone, false) checkZoneFromLabelAndAffinity(pv, zone, false)
@ -400,11 +400,11 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
} }
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil { if node == nil {
framework.Failf("unexpected nil node found") e2elog.Failf("unexpected nil node found")
} }
nodeZone, ok := node.Labels[v1.LabelZoneFailureDomain] nodeZone, ok := node.Labels[v1.LabelZoneFailureDomain]
if !ok { if !ok {
framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
} }
zoneFound := false zoneFound := false
for _, zone := range topoZones { for _, zone := range topoZones {
@ -414,7 +414,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
} }
} }
if !zoneFound { if !zoneFound {
framework.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone) e2elog.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone)
} }
for _, pv := range pvs { for _, pv := range pvs {
checkZonesFromLabelAndAffinity(pv, sets.NewString(topoZones...), true) checkZonesFromLabelAndAffinity(pv, sets.NewString(topoZones...), true)

View File

@ -226,7 +226,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name) r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
} }
default: default:
framework.Failf("genericVolumeTestResource doesn't support: %s", volType) e2elog.Failf("genericVolumeTestResource doesn't support: %s", volType)
} }
if r.volSource == nil { if r.volSource == nil {
@ -246,13 +246,13 @@ func (r *genericVolumeTestResource) cleanupResource() {
case testpatterns.PreprovisionedPV: case testpatterns.PreprovisionedPV:
ginkgo.By("Deleting pv and pvc") ginkgo.By("Deleting pv and pvc")
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 { if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) e2elog.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
} }
case testpatterns.DynamicPV: case testpatterns.DynamicPV:
ginkgo.By("Deleting pvc") ginkgo.By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner // We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete { if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", e2elog.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
r.pv.Name, v1.PersistentVolumeReclaimDelete) r.pv.Name, v1.PersistentVolumeReclaimDelete)
} }
if r.pvc != nil { if r.pvc != nil {
@ -264,7 +264,7 @@ func (r *genericVolumeTestResource) cleanupResource() {
} }
} }
default: default:
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv) e2elog.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv)
} }
} }
@ -601,7 +601,7 @@ func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string,
for op, count := range newInTreeOps { for op, count := range newInTreeOps {
if count != oldInTreeOps[op] { if count != oldInTreeOps[op] {
framework.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", pluginName, count-oldInTreeOps[op], op) e2elog.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", pluginName, count-oldInTreeOps[op], op)
} }
} }
// We don't check for migrated metrics because some negative test cases // We don't check for migrated metrics because some negative test cases

Some files were not shown because too many files have changed in this diff Show More