Merge pull request #81982 from s-ito-ts/81427_e2e_scheduling

Use log functions of core framework on test/e2e/scheduling
This commit is contained in:
Kubernetes Prow Robot
2019-09-11 02:49:25 -07:00
committed by GitHub
10 changed files with 104 additions and 114 deletions

View File

@@ -36,7 +36,6 @@ import (
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@@ -82,7 +81,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
e2enode.WaitForTotalHealthy(cs, time.Minute)
_, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err)
@@ -187,7 +186,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
e2elog.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
}
}()
@@ -321,7 +320,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
Requests: needCreateResource,
},
NodeName: node.Name,
}), true, e2elog.Logf)
}), true, framework.Logf)
if err != nil {
return err
@@ -337,16 +336,16 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
}
func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
e2elog.Logf("ComputeCPUMemFraction for node: %v", node.Name)
framework.Logf("ComputeCPUMemFraction for node: %v", node.Name)
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
e2elog.Failf("Expect error of invalid, got : %v", err)
framework.Failf("Expect error of invalid, got : %v", err)
}
for _, pod := range allpods.Items {
if pod.Spec.NodeName == node.Name {
e2elog.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
continue
@@ -372,8 +371,8 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
memFraction = floatOne
}
e2elog.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
framework.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
framework.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
return cpuFraction, memFraction
}