remove dot imports in e2e/node

This commit is contained in:
danielqsj
2019-05-10 12:32:08 +08:00
parent d01c015346
commit 087bc1369e
13 changed files with 200 additions and 200 deletions

View File

@@ -32,8 +32,8 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// This test checks if node-problem-detector (NPD) runs fine without error on
@@ -45,7 +45,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
)
f := framework.NewDefaultFramework("node-problem-detector")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessSSHKeyPresent()
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
framework.SkipUnlessProviderIs("gce", "gke")
@@ -53,10 +53,10 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
})
It("should run without error", func() {
By("Getting all nodes and their SSH-able IP addresses")
ginkgo.It("should run without error", func() {
ginkgo.By("Getting all nodes and their SSH-able IP addresses")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero())
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
hosts := []string{}
for _, node := range nodes.Items {
for _, addr := range node.Status.Addresses {
@@ -66,7 +66,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
}
}
}
Expect(len(hosts)).To(Equal(len(nodes.Items)))
gomega.Expect(len(hosts)).To(gomega.Equal(len(nodes.Items)))
isStandaloneMode := make(map[string]bool)
cpuUsageStats := make(map[string][]float64)
@@ -84,22 +84,22 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
isStandaloneMode[host] = (err == nil && result.Code == 0)
By(fmt.Sprintf("Check node %q has node-problem-detector process", host))
ginkgo.By(fmt.Sprintf("Check node %q has node-problem-detector process", host))
// Using brackets "[n]" is a trick to prevent grep command itself from
// showing up, because string text "[n]ode-problem-detector" does not
// match regular expression "[n]ode-problem-detector".
psCmd := "ps aux | grep [n]ode-problem-detector"
result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
Expect(result.Stdout).To(ContainSubstring("node-problem-detector"))
gomega.Expect(result.Code).To(gomega.BeZero())
gomega.Expect(result.Stdout).To(gomega.ContainSubstring("node-problem-detector"))
By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
ginkgo.By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
journalctlCmd := "sudo journalctl -u node-problem-detector"
result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
Expect(result.Stdout).NotTo(ContainSubstring("node-problem-detector.service: Failed"))
gomega.Expect(result.Code).To(gomega.BeZero())
gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed"))
if isStandaloneMode[host] {
cpuUsage, uptime := getCpuStat(f, host)
@@ -107,29 +107,29 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
uptimeStats[host] = append(uptimeStats[host], uptime)
}
By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host))
ginkgo.By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host))
log := "INFO: task umount.aufs:21568 blocked for more than 120 seconds."
injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\""
_, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
gomega.Expect(result.Code).To(gomega.BeZero())
}
By("Check node-problem-detector can post conditions and events to API server")
ginkgo.By("Check node-problem-detector can post conditions and events to API server")
for _, node := range nodes.Items {
By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name))
Eventually(func() error {
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name))
gomega.Eventually(func() error {
return verifyNodeCondition(f, "KernelDeadlock", v1.ConditionTrue, "AUFSUmountHung", node.Name)
}, pollTimeout, pollInterval).Should(Succeed())
}, pollTimeout, pollInterval).Should(gomega.Succeed())
By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name))
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name))
eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()}
Eventually(func() error {
gomega.Eventually(func() error {
return verifyEvents(f, eventListOptions, 1, "AUFSUmountHung", node.Name)
}, pollTimeout, pollInterval).Should(Succeed())
}, pollTimeout, pollInterval).Should(gomega.Succeed())
}
By("Gather node-problem-detector cpu and memory stats")
ginkgo.By("Gather node-problem-detector cpu and memory stats")
numIterations := 60
for i := 1; i <= numIterations; i++ {
for j, host := range hosts {
@@ -217,22 +217,22 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64
memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat"
result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
gomega.Expect(result.Code).To(gomega.BeZero())
lines := strings.Split(result.Stdout, "\n")
memoryUsage, err := strconv.ParseFloat(lines[0], 64)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
var totalInactiveFile float64
for _, line := range lines[1:] {
tokens := strings.Split(line, " ")
if tokens[0] == "total_rss" {
rss, err = strconv.ParseFloat(tokens[1], 64)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
}
if tokens[0] == "total_inactive_file" {
totalInactiveFile, err = strconv.ParseFloat(tokens[1], 64)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
}
}
@@ -253,7 +253,7 @@ func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) {
cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'"
result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
gomega.Expect(result.Code).To(gomega.BeZero())
lines := strings.Split(result.Stdout, "\n")
usage, err = strconv.ParseFloat(lines[0], 64)
@@ -279,6 +279,6 @@ func getNpdPodStat(f *framework.Framework, nodeName string) (cpuUsage, rss, work
hasNpdPod = true
break
}
Expect(hasNpdPod).To(BeTrue())
gomega.Expect(hasNpdPod).To(gomega.BeTrue())
return
}