refactor: replace framework.Failf with e2elog.Failf
This commit is contained in:
@@ -473,7 +473,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
|
||||
for i := 0; i < 3; i++ {
|
||||
j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to get ingress %s/%s: %v", ns, name, err)
|
||||
e2elog.Failf("failed to get ingress %s/%s: %v", ns, name, err)
|
||||
}
|
||||
update(j.Ingress)
|
||||
j.Ingress, err = j.runUpdate(j.Ingress)
|
||||
@@ -482,10 +482,10 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
|
||||
return
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
framework.Failf("failed to update ingress %s/%s: %v", ns, name, err)
|
||||
e2elog.Failf("failed to update ingress %s/%s: %v", ns, name, err)
|
||||
}
|
||||
}
|
||||
framework.Failf("too many retries updating ingress %s/%s", ns, name)
|
||||
e2elog.Failf("too many retries updating ingress %s/%s", ns, name)
|
||||
}
|
||||
|
||||
// AddHTTPS updates the ingress to add this secret for these hosts.
|
||||
@@ -543,7 +543,7 @@ func (j *TestJig) GetRootCA(secretName string) (rootCA []byte) {
|
||||
var ok bool
|
||||
rootCA, ok = j.RootCAs[secretName]
|
||||
if !ok {
|
||||
framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
|
||||
e2elog.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -675,7 +675,7 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address st
|
||||
// WaitForIngress returns when it gets the first 200 response
|
||||
func (j *TestJig) WaitForIngress(waitForNodePort bool) {
|
||||
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil {
|
||||
framework.Failf("error in waiting for ingress to get an address: %s", err)
|
||||
e2elog.Failf("error in waiting for ingress to get an address: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -688,7 +688,7 @@ func (j *TestJig) WaitForIngressToStable() {
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
framework.Failf("error in waiting for ingress to stablize: %v", err)
|
||||
e2elog.Failf("error in waiting for ingress to stablize: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -814,7 +814,7 @@ func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) {
|
||||
// Wait for the loadbalancer IP.
|
||||
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout)
|
||||
e2elog.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout)
|
||||
}
|
||||
responses := sets.NewString()
|
||||
timeoutClient := &http.Client{Timeout: IngressReqTimeout}
|
||||
@@ -858,7 +858,7 @@ func (cont *NginxIngressController) Init() {
|
||||
pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()})
|
||||
framework.ExpectNoError(err)
|
||||
if len(pods.Items) == 0 {
|
||||
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
|
||||
e2elog.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
|
||||
}
|
||||
cont.pod = &pods.Items[0]
|
||||
cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod)
|
||||
|
||||
@@ -43,7 +43,7 @@ func MakeFirewallNameForLBService(name string) string {
|
||||
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
e2elog.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
|
||||
@@ -71,7 +71,7 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal
|
||||
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
e2elog.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)
|
||||
|
||||
@@ -262,7 +262,7 @@ func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerNa
|
||||
}
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
framework.Failf("Failed to cleanup service GCE resources.")
|
||||
e2elog.Failf("Failed to cleanup service GCE resources.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -332,7 +332,7 @@ func GetInstanceTags(cloudConfig framework.CloudConfig, instanceName string) *co
|
||||
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
e2elog.Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
}
|
||||
return res.Tags
|
||||
}
|
||||
@@ -346,7 +346,7 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
framework.Failf("failed to set instance tags: %v", err)
|
||||
e2elog.Failf("failed to set instance tags: %v", err)
|
||||
}
|
||||
e2elog.Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
|
||||
return resTags.Items
|
||||
|
||||
@@ -788,12 +788,12 @@ func (cont *IngressController) CreateStaticIP(name string) string {
|
||||
e2elog.Logf("Failed to delete static ip %v: %v", name, delErr)
|
||||
}
|
||||
}
|
||||
framework.Failf("Failed to allocate static ip %v: %v", name, err)
|
||||
e2elog.Failf("Failed to allocate static ip %v: %v", name, err)
|
||||
}
|
||||
|
||||
ip, err := gceCloud.GetGlobalAddress(name)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get newly created static ip %v: %v", name, err)
|
||||
e2elog.Failf("Failed to get newly created static ip %v: %v", name, err)
|
||||
}
|
||||
|
||||
cont.staticIPName = ip.Name
|
||||
|
||||
@@ -66,7 +66,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
|
||||
}
|
||||
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
|
||||
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
|
||||
e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.")
|
||||
}
|
||||
|
||||
})
|
||||
@@ -97,12 +97,12 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
|
||||
func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) {
|
||||
err := RecreateNodes(c, nodes)
|
||||
if err != nil {
|
||||
framework.Failf("Test failed; failed to start the restart instance group command.")
|
||||
e2elog.Failf("Test failed; failed to start the restart instance group command.")
|
||||
}
|
||||
|
||||
err = WaitForNodeBootIdsToChange(c, nodes, framework.RecreateNodeReadyAgainTimeout)
|
||||
if err != nil {
|
||||
framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
|
||||
e2elog.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
|
||||
}
|
||||
|
||||
nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
|
||||
@@ -110,7 +110,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
|
||||
e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
|
||||
|
||||
if len(nodes) != len(nodesAfter) {
|
||||
framework.Failf("Had %d nodes before nodes were recreated, but now only have %d",
|
||||
e2elog.Failf("Had %d nodes before nodes were recreated, but now only have %d",
|
||||
len(nodes), len(nodesAfter))
|
||||
}
|
||||
|
||||
@@ -120,6 +120,6 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
|
||||
framework.ExpectNoError(err)
|
||||
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
|
||||
framework.Failf("At least one pod wasn't running and ready after the restart.")
|
||||
e2elog.Failf("At least one pod wasn't running and ready after the restart.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,7 +247,7 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config TestConfig,
|
||||
|
||||
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||
e2elog.Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||
}
|
||||
|
||||
return config, pod, secret, ip
|
||||
@@ -485,7 +485,7 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in
|
||||
}
|
||||
clientPod, err := podsNamespacer.Create(clientPod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||
e2elog.Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
|
||||
|
||||
Reference in New Issue
Block a user