Merge pull request #82291 from danwinship/getnodes

migrate from framework.GetReadySchedulableNodesOrDie to e2enode.GetReadySchedulableNodes
This commit is contained in:
Kubernetes Prow Robot 2019-09-25 11:04:43 -07:00 committed by GitHub
commit df271a1799
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 308 additions and 239 deletions

View File

@ -66,6 +66,7 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/aws:go_default_library", "//test/e2e/framework/providers/aws:go_default_library",
"//test/e2e/framework/providers/azure:go_default_library", "//test/e2e/framework/providers/azure:go_default_library",

View File

@ -84,6 +84,7 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",

View File

@ -39,6 +39,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -48,8 +49,11 @@ import (
// with some wiggle room, to prevent pods being unable to schedule due // with some wiggle room, to prevent pods being unable to schedule due
// to max pod constraints. // to max pod constraints.
func estimateMaximumPods(c clientset.Interface, min, max int32) int32 { func estimateMaximumPods(c clientset.Interface, min, max int32) int32 {
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
availablePods := int32(0) availablePods := int32(0)
for _, node := range framework.GetReadySchedulableNodesOrDie(c).Items { for _, node := range nodes.Items {
if q, ok := node.Status.Allocatable["pods"]; ok { if q, ok := node.Status.Allocatable["pods"]; ok {
if num, ok := q.AsInt64(); ok { if num, ok := q.AsInt64(); ok {
availablePods += int32(num) availablePods += int32(num)

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/daemon"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -159,9 +160,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) framework.ExpectNoError(err)
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1) framework.ExpectEqual(len(daemonSetLabels), 1)
@ -172,7 +173,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
nodeSelector[daemonsetColorLabel] = "green" nodeSelector[daemonsetColorLabel] = "green"
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) greenNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
@ -222,9 +223,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) framework.ExpectNoError(err)
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1) framework.ExpectEqual(len(daemonSetLabels), 1)
@ -234,7 +235,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Remove the node label and wait for daemons to be unscheduled") ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{}) _, err = setDaemonSetNodeLabels(c, node.Name, map[string]string{})
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
@ -381,13 +382,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
rollback of updates to a DaemonSet. rollback of updates to a DaemonSet.
*/ */
framework.ConformanceIt("should rollback without unnecessary restarts", func() { framework.ConformanceIt("should rollback without unnecessary restarts", func() {
schedulableNodes := framework.GetReadySchedulableNodesOrDie(c) schedulableNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
framework.Logf("Create a RollingUpdate DaemonSet") framework.Logf("Create a RollingUpdate DaemonSet")
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds) ds, err = c.AppsV1().DaemonSets(ns).Create(ds)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Check that daemon pods launch on every node of the cluster") framework.Logf("Check that daemon pods launch on every node of the cluster")
@ -420,7 +422,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Failf("unexpected pod found, image = %s", image) framework.Failf("unexpected pod found, image = %s", image)
} }
} }
schedulableNodes = framework.GetReadySchedulableNodesOrDie(c) schedulableNodes, err = e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
if len(schedulableNodes.Items) < 2 { if len(schedulableNodes.Items) < 2 {
framework.ExpectEqual(len(existingPods), 0) framework.ExpectEqual(len(existingPods), 0)
} else { } else {
@ -505,7 +508,10 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
} }
func clearDaemonSetNodeLabels(c clientset.Interface) error { func clearDaemonSetNodeLabels(c clientset.Interface) error {
nodeList := framework.GetReadySchedulableNodesOrDie(c) nodeList, err := e2enode.GetReadySchedulableNodes(c)
if err != nil {
return err
}
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil { if err != nil {

View File

@ -492,7 +492,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
ginkgo.By("choose a node - we will block all network traffic on this node") ginkgo.By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions var podOpts metav1.ListOptions
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
e2enode.Filter(nodes, func(node v1.Node) bool { e2enode.Filter(nodes, func(node v1.Node) bool {
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) { if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
return false return false

View File

@ -34,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch" watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -679,8 +680,8 @@ var _ = SIGDescribe("StatefulSet", func() {
podName := "test-pod" podName := "test-pod"
statefulPodName := ssName + "-0" statefulPodName := ssName + "-0"
ginkgo.By("Looking for a node to schedule stateful set and pod") ginkgo.By("Looking for a node to schedule stateful set and pod")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
node := nodes.Items[0] framework.ExpectNoError(err)
ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name) ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name)
conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"} conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"}
@ -699,7 +700,7 @@ var _ = SIGDescribe("StatefulSet", func() {
NodeName: node.Name, NodeName: node.Name,
}, },
} }
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)

View File

@ -62,7 +62,8 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
} }
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state. // Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err = e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
schedulableCount := len(nodes.Items) schedulableCount := len(nodes.Items)
framework.ExpectEqual(schedulableCount, nodeGroupSize, "not all nodes are schedulable") framework.ExpectEqual(schedulableCount, nodeGroupSize, "not all nodes are schedulable")
}) })

View File

@ -36,7 +36,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
const ( const (
@ -90,9 +89,9 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory] mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000) coresPerNode = int((&cpu).MilliValue() / 1000)
@ -324,7 +323,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
time.Sleep(scaleDownTimeout) time.Sleep(scaleDownTimeout)
ginkgo.By("Checking if the number of nodes is as expected") ginkgo.By("Checking if the number of nodes is as expected")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes) klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
framework.ExpectEqual(len(nodes.Items), totalNodes) framework.ExpectEqual(len(nodes.Items), totalNodes)
}) })

View File

@ -111,7 +111,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// Give instances time to spin up // Give instances time to spin up
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
coreCount = 0 coreCount = 0
for _, node := range nodes.Items { for _, node := range nodes.Items {
@ -363,7 +364,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false) framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false)
framework.ExpectEqual(status.status, caNoScaleUpStatus) framework.ExpectEqual(status.status, caNoScaleUpStatus)
framework.ExpectEqual(status.ready, status.target) framework.ExpectEqual(status.ready, status.target)
framework.ExpectEqual(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items), status.target+unmanagedNodes) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodes.Items), status.target+unmanagedNodes)
}) })
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() { ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -723,7 +726,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) { runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
ginkgo.By("No nodes should be removed") ginkgo.By("No nodes should be removed")
time.Sleep(scaleDownTimeout) time.Sleep(scaleDownTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodes.Items), increasedSize) framework.ExpectEqual(len(nodes.Items), increasedSize)
}) })
}) })
@ -919,7 +923,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
time.Sleep(scaleUpTimeout) time.Sleep(scaleUpTimeout)
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount) framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
status, err := getClusterwideStatus(c) status, err := getClusterwideStatus(c)
@ -1271,7 +1276,8 @@ func getPoolInitialSize(poolName string) int {
func getPoolSize(f *framework.Framework, poolName string) int { func getPoolSize(f *framework.Framework, poolName string) int {
size := 0 size := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName { if node.Labels[gkeNodepoolNameKey] == poolName {
size++ size++

View File

@ -33,7 +33,6 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
// Constants used in dns-autoscaling test. // Constants used in dns-autoscaling test.
@ -57,11 +56,11 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
c = f.ClientSet c = f.ClientSet
nodeCount := len(framework.GetReadySchedulableNodesOrDie(c).Items) nodes, err := e2enode.GetReadySchedulableNodes(c)
gomega.Expect(nodeCount).NotTo(gomega.BeZero()) framework.ExpectNoError(err)
nodeCount := len(nodes.Items)
ginkgo.By("Collecting original replicas count and DNS scaling params") ginkgo.By("Collecting original replicas count and DNS scaling params")
var err error
originDNSReplicasCount, err = getDNSReplicas(c) originDNSReplicasCount, err = getDNSReplicas(c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -236,12 +235,13 @@ func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear)
return func(c clientset.Interface) int { return func(c clientset.Interface) int {
var replicasFromNodes float64 var replicasFromNodes float64
var replicasFromCores float64 var replicasFromCores float64
nodes := framework.GetReadySchedulableNodesOrDie(c).Items nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
if params.nodesPerReplica > 0 { if params.nodesPerReplica > 0 {
replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica) replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica)
} }
if params.coresPerReplica > 0 { if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica) replicasFromCores = math.Ceil(float64(getScheduableCores(nodes.Items)) / params.coresPerReplica)
} }
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores))) return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
} }

View File

@ -43,8 +43,8 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() { ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() {
ginkgo.By("deleting a node on the cloud provider") ginkgo.By("deleting a node on the cloud provider")
nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c) nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(c)
nodeToDelete := nodeDeleteCandidates.Items[0] framework.ExpectNoError(err)
origNodes, err := e2enode.GetReadyNodesIncludingTainted(c) origNodes, err := e2enode.GetReadyNodesIncludingTainted(c)
if err != nil { if err != nil {
@ -55,7 +55,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
framework.Logf("Original number of ready nodes: %d", len(origNodes.Items)) framework.Logf("Original number of ready nodes: %d", len(origNodes.Items))
err = framework.DeleteNodeOnCloudProvider(&nodeToDelete) err = framework.DeleteNodeOnCloudProvider(nodeToDelete)
if err != nil { if err != nil {
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err) framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
} }

View File

@ -41,9 +41,9 @@ var _ = framework.KubeDescribe("NodeLease", func() {
f := framework.NewDefaultFramework("node-lease-test") f := framework.NewDefaultFramework("node-lease-test")
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero()) framework.ExpectNoError(err)
nodeName = nodes.Items[0].ObjectMeta.Name nodeName = node.Name
}) })
ginkgo.Context("when the NodeLease feature is enabled", func() { ginkgo.Context("when the NodeLease feature is enabled", func() {

View File

@ -50,7 +50,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",

View File

@ -47,6 +47,7 @@ import (
"k8s.io/client-go/restmapper" "k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale" scaleclient "k8s.io/client-go/scale"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epsp "k8s.io/kubernetes/test/e2e/framework/psp" e2epsp "k8s.io/kubernetes/test/e2e/framework/psp"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -567,23 +568,21 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking. // CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string { func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes := GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxCount)
ExpectNoError(err)
podLabels := map[string]string{ podLabels := map[string]string{
"app": appName + "-pod", "app": appName + "-pod",
} }
for i, node := range nodes.Items { for i, node := range nodes.Items {
// one per node, but no more than maxCount. Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
if i <= maxCount { _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) ObjectMeta: metav1.ObjectMeta{
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{ Name: fmt.Sprintf(appName+"-pod-%v", i),
ObjectMeta: metav1.ObjectMeta{ Labels: podLabels,
Name: fmt.Sprintf(appName+"-pod-%v", i), },
Labels: podLabels, Spec: podSpec(node),
}, })
Spec: podSpec(node), ExpectNoError(err)
})
ExpectNoError(err)
}
} }
return podLabels return podLabels
} }

View File

@ -10,6 +10,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
], ],
) )

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/version"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
) )
// RealVersion turns a version constants into a version string deployable on // RealVersion turns a version constants into a version string deployable on
@ -86,7 +87,10 @@ func CheckMasterVersion(c clientset.Interface, want string) error {
// CheckNodesVersions validates the nodes versions // CheckNodesVersions validates the nodes versions
func CheckNodesVersions(cs clientset.Interface, want string) error { func CheckNodesVersions(cs clientset.Interface, want string) error {
l := framework.GetReadySchedulableNodesOrDie(cs) l, err := e2enode.GetReadySchedulableNodes(cs)
if err != nil {
return err
}
for _, n := range l.Items { for _, n := range l.Items {
// We do prefix trimming and then matching because: // We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4 // want looks like: 0.19.3-815-g50e67d4

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -591,7 +590,8 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
ginkgo.By("Getting node addresses") ginkgo.By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet)
ExpectNoError(err)
config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP) config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP)
SkipUnlessNodeCountIsAtLeast(2) SkipUnlessNodeCountIsAtLeast(2)
@ -620,28 +620,11 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
} }
} }
// shuffleNodes copies nodes from the specified slice into a copy in random
// order. It returns a new slice.
func shuffleNodes(nodes []v1.Node) []v1.Node {
shuffled := make([]v1.Node, len(nodes))
perm := rand.Perm(len(nodes))
for i, j := range perm {
shuffled[j] = nodes[i]
}
return shuffled
}
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod { func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) nodeList, err := e2enode.GetBoundedReadySchedulableNodes(config.f.ClientSet, maxNetProxyPodsCount)
ExpectNoError(err)
// To make this test work reasonably fast in large clusters, nodes := nodeList.Items
// we limit the number of NetProxyPods to no more than
// maxNetProxyPodsCount on random nodes.
nodes := shuffleNodes(nodeList.Items)
if len(nodes) > maxNetProxyPodsCount {
nodes = nodes[:maxNetProxyPodsCount]
}
// create pods, one for each node // create pods, one for each node
createdPods := make([]*v1.Pod, 0, len(nodes)) createdPods := make([]*v1.Pod, 0, len(nodes))

View File

@ -15,6 +15,7 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -317,7 +318,7 @@ func PickIP(c clientset.Interface) (string, error) {
// GetPublicIps returns a public IP list of nodes. // GetPublicIps returns a public IP list of nodes.
func GetPublicIps(c clientset.Interface) ([]string, error) { func GetPublicIps(c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodesOrDie(c) nodes, err := GetReadySchedulableNodes(c)
if err != nil { if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err) return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
} }
@ -329,25 +330,57 @@ func GetPublicIps(c clientset.Interface) ([]string, error) {
return ips, nil return ips, nil
} }
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on. // GetReadySchedulableNodes addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable. // 1) Needs to be schedulable.
// 2) Needs to be ready. // 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. // If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
// If there are no nodes that are both ready and schedulable, this will return an error.
// TODO: remove references in framework/util.go. // TODO: remove references in framework/util.go.
// TODO: remove "OrDie" suffix. func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(c) nodes, err = checkWaitListSchedulableNodes(c)
if err != nil { if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err) return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
} }
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
Filter(nodes, func(node v1.Node) bool { Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && IsNodeUntainted(&node) return IsNodeSchedulable(&node) && IsNodeUntainted(&node)
}) })
if len(nodes.Items) == 0 {
return nil, fmt.Errorf("there are currently no ready, schedulable nodes in the cluster")
}
return nodes, nil return nodes, nil
} }
// GetBoundedReadySchedulableNodes is like GetReadySchedulableNodes except that it returns
// at most maxNodes nodes. Use this to keep your test case from blowing up when run on a
// large cluster.
func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) {
nodes, err = GetReadySchedulableNodes(c)
if err != nil {
return nil, err
}
if len(nodes.Items) > maxNodes {
shuffled := make([]v1.Node, maxNodes)
perm := rand.Perm(len(nodes.Items))
for i, j := range perm {
if j < len(shuffled) {
shuffled[j] = nodes.Items[i]
}
}
nodes.Items = shuffled
}
return nodes, nil
}
// GetRandomReadySchedulableNode gets a single randomly-selected node which is available for
// running pods on. If there are no available nodes it will return an error.
func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(c)
if err != nil {
return nil, err
}
return &nodes.Items[rand.Intn(len(nodes.Items))], nil
}
// GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted. // GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted.
// There are cases when we care about tainted nodes // There are cases when we care about tainted nodes
// E.g. in tests related to nodes with gpu we care about nodes despite // E.g. in tests related to nodes with gpu we care about nodes despite

View File

@ -338,13 +338,13 @@ func (k *NodeKiller) Run(stopCh <-chan struct{}) {
} }
func (k *NodeKiller) pickNodes() []v1.Node { func (k *NodeKiller) pickNodes() []v1.Node {
nodes := GetReadySchedulableNodesOrDie(k.client) nodes, err := e2enode.GetReadySchedulableNodes(k.client)
ExpectNoError(err)
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items))) numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
shuffledNodes := shuffleNodes(nodes.Items)
if len(shuffledNodes) > numNodes { nodes, err = e2enode.GetBoundedReadySchedulableNodes(k.client, numNodes)
return shuffledNodes[:numNodes] ExpectNoError(err)
} return nodes.Items
return shuffledNodes
} }
func (k *NodeKiller) kill(nodes []v1.Node) { func (k *NodeKiller) kill(nodes []v1.Node) {

View File

@ -353,16 +353,6 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
return resTags.Items return resTags.Items
} }
// GetNodeTags gets k8s node tag from one of the nodes
func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string {
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) == 0 {
framework.Logf("GetNodeTags: Found 0 node.")
return []string{}
}
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
}
// IsGoogleAPIHTTPErrorCode returns true if the error is a google api // IsGoogleAPIHTTPErrorCode returns true if the error is a google api
// error matching the corresponding HTTP error code. // error matching the corresponding HTTP error code.
func IsGoogleAPIHTTPErrorCode(err error, code int) bool { func IsGoogleAPIHTTPErrorCode(err error, code int) bool {

View File

@ -264,7 +264,8 @@ func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeo
// GetEndpointNodes returns a map of nodenames:external-ip on which the // GetEndpointNodes returns a map of nodenames:external-ip on which the
// endpoints of the given Service are running. // endpoints of the given Service are running.
func (j *TestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { func (j *TestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
nodes := j.GetNodes(MaxNodesForEndpointsTests) nodes, err := e2enode.GetBoundedReadySchedulableNodes(j.Client, MaxNodesForEndpointsTests)
framework.ExpectNoError(err)
endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
@ -289,27 +290,6 @@ func (j *TestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
return nodeMap return nodeMap
} }
// GetNodes returns the first maxNodesForTest nodes. Useful in large clusters
// where we don't eg: want to create an endpoint per node.
func (j *TestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) {
nodes = framework.GetReadySchedulableNodesOrDie(j.Client)
if len(nodes.Items) <= maxNodesForTest {
maxNodesForTest = len(nodes.Items)
}
nodes.Items = nodes.Items[:maxNodesForTest]
return nodes
}
// GetNodesNames returns a list of names of the first maxNodesForTest nodes
func (j *TestJig) GetNodesNames(maxNodesForTest int) []string {
nodes := j.GetNodes(maxNodesForTest)
nodesNames := []string{}
for _, node := range nodes.Items {
nodesNames = append(nodesNames, node.Name)
}
return nodesNames
}
// WaitForEndpointOnNode waits for a service endpoint on the given node. // WaitForEndpointOnNode waits for a service endpoint on the given node.
func (j *TestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) { func (j *TestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) {
err := wait.PollImmediate(framework.Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) { err := wait.PollImmediate(framework.Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) {
@ -840,7 +820,8 @@ func (j *TestJig) checkNodePortServiceReachability(namespace string, svc *v1.Ser
servicePorts := svc.Spec.Ports servicePorts := svc.Spec.Ports
// Consider only 2 nodes for testing // Consider only 2 nodes for testing
nodes := j.GetNodes(2) nodes, err := e2enode.GetBoundedReadySchedulableNodes(j.Client, 2)
framework.ExpectNoError(err)
j.WaitForAvailableEndpoint(namespace, svc.Name, ServiceEndpointsTimeout) j.WaitForAvailableEndpoint(namespace, svc.Name, ServiceEndpointsTimeout)

View File

@ -26,6 +26,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
) )
// GetServicesProxyRequest returns a request for a service proxy. // GetServicesProxyRequest returns a request for a service proxy.
@ -110,7 +111,9 @@ func DescribeSvc(ns string) {
// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service. // GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service.
func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration { func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration {
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber { nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > LargeClusterMinNodesNumber {
return LoadBalancerCreateTimeoutLarge return LoadBalancerCreateTimeoutLarge
} }
return LoadBalancerCreateTimeoutDefault return LoadBalancerCreateTimeoutDefault

View File

@ -29,6 +29,7 @@ import (
"k8s.io/component-base/version" "k8s.io/component-base/version"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
@ -80,7 +81,9 @@ func SetupSuite() {
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if TestContext.CloudConfig.NumNodes == DefaultNumNodes { if TestContext.CloudConfig.NumNodes == DefaultNumNodes {
TestContext.CloudConfig.NumNodes = len(GetReadySchedulableNodesOrDie(c).Items) nodes, err := e2enode.GetReadySchedulableNodes(c)
ExpectNoError(err)
TestContext.CloudConfig.NumNodes = len(nodes.Items)
} }
// Ensure all pods are running and ready before starting tests (otherwise, // Ensure all pods are running and ready before starting tests (otherwise,

View File

@ -2897,7 +2897,10 @@ func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testut
// PrepareNodes prepares nodes in the cluster. // PrepareNodes prepares nodes in the cluster.
func (p *E2ETestNodePreparer) PrepareNodes() error { func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client) nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
return err
}
numTemplates := 0 numTemplates := 0
for _, v := range p.countToStrategy { for _, v := range p.countToStrategy {
numTemplates += v.Count numTemplates += v.Count
@ -2923,9 +2926,11 @@ func (p *E2ETestNodePreparer) PrepareNodes() error {
// CleanupNodes cleanups nodes in the cluster. // CleanupNodes cleanups nodes in the cluster.
func (p *E2ETestNodePreparer) CleanupNodes() error { func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client) nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
return err
}
for i := range nodes.Items { for i := range nodes.Items {
var err error
name := nodes.Items[i].Name name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name] strategy, found := p.nodeToAppliedStrategy[name]
if found { if found {

View File

@ -21,6 +21,7 @@ import (
"os/exec" "os/exec"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -98,7 +99,8 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
// label with the given node pool name. // label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int { func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
nodeCount := 0 nodeCount := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName { if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
nodeCount++ nodeCount++

View File

@ -16,6 +16,7 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/config:go_default_library", "//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library", "//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
"//test/e2e/instrumentation/logging/stackdriver:go_default_library", "//test/e2e/instrumentation/logging/stackdriver:go_default_library",

View File

@ -27,6 +27,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config" "k8s.io/kubernetes/test/e2e/framework/config"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -76,7 +77,8 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs. // was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) { func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
framework.ExpectNotEqual(totalPods, 0) framework.ExpectNotEqual(totalPods, 0)

View File

@ -18,6 +18,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/utils:go_default_library", "//test/e2e/instrumentation/logging/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
@ -42,7 +43,8 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
ginkgo.It("should ingest logs from applications running for a prolonged amount of time", func() { ginkgo.It("should ingest logs from applications running for a prolonged amount of time", func() {
withLogProviderForScope(f, podsScope, func(p *sdLogProvider) { withLogProviderForScope(f, podsScope, func(p *sdLogProvider) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
maxPodCount := 10 maxPodCount := 10
jobDuration := 30 * time.Minute jobDuration := 30 * time.Minute
linesPerPodPerSecond := 100 linesPerPodPerSecond := 100
@ -65,7 +67,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
podsByRun := [][]utils.FiniteLoggingPod{} podsByRun := [][]utils.FiniteLoggingPod{}
for runIdx := 0; runIdx < podRunCount; runIdx++ { for runIdx := 0; runIdx < podRunCount; runIdx++ {
podsInRun := []utils.FiniteLoggingPod{} podsInRun := []utils.FiniteLoggingPod{}
for nodeIdx, node := range nodes { for nodeIdx, node := range nodes.Items {
podName := fmt.Sprintf("job-logs-generator-%d-%d-%d-%d", maxPodCount, linesPerPod, runIdx, nodeIdx) podName := fmt.Sprintf("job-logs-generator-%d-%d-%d-%d", maxPodCount, linesPerPod, runIdx, nodeIdx)
pod := utils.NewLoadLoggingPod(podName, node.Name, linesPerPod, jobDuration) pod := utils.NewLoadLoggingPod(podName, node.Name, linesPerPod, jobDuration)
pods = append(pods, pod) pods = append(pods, pod)
@ -90,7 +92,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
}() }()
checker := utils.NewFullIngestionPodLogChecker(p, maxAllowedLostFraction, pods...) checker := utils.NewFullIngestionPodLogChecker(p, maxAllowedLostFraction, pods...)
err := utils.WaitForLogs(checker, ingestionInterval, ingestionTimeout) err = utils.WaitForLogs(checker, ingestionInterval, ingestionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
utils.EnsureLoggingAgentRestartsCount(f, p.LoggingAgentName(), allowedRestarts) utils.EnsureLoggingAgentRestartsCount(f, p.LoggingAgentName(), allowedRestarts)

View File

@ -25,6 +25,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/k8s.io/utils/integer:go_default_library", "//vendor/k8s.io/utils/integer:go_default_library",

View File

@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/utils/integer" "k8s.io/utils/integer"
) )
@ -40,7 +41,10 @@ func EnsureLoggingAgentDeployment(f *framework.Framework, appName string) error
agentPerNode[pod.Spec.NodeName]++ agentPerNode[pod.Spec.NodeName]++
} }
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
if err != nil {
return fmt.Errorf("failed to get nodes: %v", err)
}
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
agentPodsCount, ok := agentPerNode[node.Name] agentPodsCount, ok := agentPerNode[node.Name]

View File

@ -19,11 +19,13 @@ package utils
import ( import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
) )
// GetNodeIds returns the list of node names and panics in case of failure. // GetNodeIds returns the list of node names and panics in case of failure.
func GetNodeIds(cs clientset.Interface) []string { func GetNodeIds(cs clientset.Interface) []string {
nodes := framework.GetReadySchedulableNodesOrDie(cs) nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
nodeIds := []string{} nodeIds := []string{}
for _, n := range nodes.Items { for _, n := range nodes.Items {
nodeIds = append(nodeIds, n.Name) nodeIds = append(nodeIds, n.Name)

View File

@ -39,6 +39,7 @@ go_library(
"//test/e2e/framework/config:go_default_library", "//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/scheduling:go_default_library", "//test/e2e/scheduling:go_default_library",

View File

@ -23,6 +23,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
gin "github.com/onsi/ginkgo" gin "github.com/onsi/ginkgo"
@ -51,9 +52,9 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
gin.It("should grab all metrics from a Kubelet.", func() { gin.It("should grab all metrics from a Kubelet.", func() {
gin.By("Proxying to Node through the API server") gin.By("Proxying to Node through the API server")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gom.Expect(nodes.Items).NotTo(gom.BeEmpty()) framework.ExpectNoError(err)
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name) response, err := grabber.GrabFromKubelet(node.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gom.Expect(response).NotTo(gom.BeEmpty()) gom.Expect(response).NotTo(gom.BeEmpty())
}) })

View File

@ -39,9 +39,9 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
var nodeName string var nodeName string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) var err error
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero()) node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
node = &nodes.Items[0] framework.ExpectNoError(err)
nodeName = node.Name nodeName = node.Name
}) })

View File

@ -103,7 +103,8 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
ginkgo.By("verify node lease exists for every nodes") ginkgo.By("verify node lease exists for every nodes")
originalNodes := framework.GetReadySchedulableNodesOrDie(c) originalNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes) framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes)
gomega.Eventually(func() error { gomega.Eventually(func() error {
@ -128,7 +129,8 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute) err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
targetNodes := framework.GetReadySchedulableNodesOrDie(c) targetNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes)) framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes))
ginkgo.By("verify node lease is deleted for the deleted node") ginkgo.By("verify node lease is deleted for the deleted node")

View File

@ -134,7 +134,8 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
// Get all nodes, and kick off the test on each. // Get all nodes, and kick off the test on each.
nodelist := framework.GetReadySchedulableNodesOrDie(c) nodelist, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err, "failed to list nodes")
if hook != nil { if hook != nil {
defer func() { defer func() {
framework.Logf("Executing termination hook on nodes") framework.Logf("Executing termination hook on nodes")

View File

@ -47,7 +47,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
ginkgo.It("should have ipv4 and ipv6 internal node ip", func() { ginkgo.It("should have ipv4 and ipv6 internal node ip", func() {
// TODO (aramase) can switch to new function to get all nodes // TODO (aramase) can switch to new function to get all nodes
nodeList := framework.GetReadySchedulableNodesOrDie(cs) nodeList, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
// get all internal ips for node // get all internal ips for node
@ -61,7 +62,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
ginkgo.It("should have ipv4 and ipv6 node podCIDRs", func() { ginkgo.It("should have ipv4 and ipv6 node podCIDRs", func() {
// TODO (aramase) can switch to new function to get all nodes // TODO (aramase) can switch to new function to get all nodes
nodeList := framework.GetReadySchedulableNodesOrDie(cs) nodeList, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
framework.ExpectEqual(len(node.Spec.PodCIDRs), 2) framework.ExpectEqual(len(node.Spec.PodCIDRs), 2)
@ -121,12 +123,10 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
// get all schedulable nodes to determine the number of replicas for pods // get all schedulable nodes to determine the number of replicas for pods
// this is to ensure connectivity from all nodes on cluster // this is to ensure connectivity from all nodes on cluster
nodeList := framework.GetReadySchedulableNodesOrDie(cs) // FIXME: tests may be run in large clusters. This test is O(n^2) in the
gomega.Expect(nodeList).NotTo(gomega.BeNil()) // number of nodes used. It should use GetBoundedReadySchedulableNodes().
nodeList, err := e2enode.GetReadySchedulableNodes(cs)
if len(nodeList.Items) < 1 { framework.ExpectNoError(err)
framework.Failf("Expect at least 1 node, got %v", len(nodeList.Items))
}
replicas := int32(len(nodeList.Items)) replicas := int32(len(nodeList.Items))

View File

@ -32,7 +32,6 @@ import (
gcecloud "k8s.io/legacy-cloud-providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
const ( const (
@ -73,11 +72,12 @@ var _ = SIGDescribe("Firewall rule", func() {
framework.Logf("Got cluster ID: %v", clusterID) framework.Logf("Got cluster ID: %v", clusterID)
jig := e2eservice.NewTestJig(cs, serviceName) jig := e2eservice.NewTestJig(cs, serviceName)
nodeList := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) nodeList, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
gomega.Expect(nodeList).NotTo(gomega.BeNil()) framework.ExpectNoError(err)
nodesNames := jig.GetNodesNames(e2eservice.MaxNodesForEndpointsTests)
if len(nodesNames) <= 0 { nodesNames := []string{}
framework.Failf("Expect at least 1 node, got: %v", nodesNames) for _, node := range nodeList.Items {
nodesNames = append(nodesNames, node.Name)
} }
nodesSet := sets.NewString(nodesNames...) nodesSet := sets.NewString(nodesNames...)
@ -188,10 +188,8 @@ var _ = SIGDescribe("Firewall rule", func() {
}) })
ginkgo.It("should have correct firewall rules for e2e cluster", func() { ginkgo.It("should have correct firewall rules for e2e cluster", func() {
nodes := framework.GetReadySchedulableNodesOrDie(cs) nodes, err := e2enode.GetReadySchedulableNodes(cs)
if len(nodes.Items) <= 0 { framework.ExpectNoError(err)
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
}
ginkgo.By("Checking if e2e firewall rules are correct") ginkgo.By("Checking if e2e firewall rules are correct")
for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {

View File

@ -51,15 +51,16 @@ var _ = SIGDescribe("Network", func() {
fr := framework.NewDefaultFramework("network") fr := framework.NewDefaultFramework("network")
ginkgo.It("should set TCP CLOSE_WAIT timeout", func() { ginkgo.It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet) nodes, err := e2enode.GetBoundedReadySchedulableNodes(fr.ClientSet, 2)
ips := e2enode.CollectAddresses(nodes, v1.NodeInternalIP) framework.ExpectNoError(err)
if len(nodes.Items) < 2 { if len(nodes.Items) < 2 {
framework.Skipf( framework.Skipf(
"Test requires >= 2 Ready nodes, but there are only %v nodes", "Test requires >= 2 Ready nodes, but there are only %v nodes",
len(nodes.Items)) len(nodes.Items))
} }
ips := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
type NodeInfo struct { type NodeInfo struct {
node *v1.Node node *v1.Node
name string name string
@ -81,7 +82,7 @@ var _ = SIGDescribe("Network", func() {
zero := int64(0) zero := int64(0)
// Some distributions (Ubuntu 16.04 etc.) don't support the proc file. // Some distributions (Ubuntu 16.04 etc.) don't support the proc file.
_, err := e2essh.IssueSSHCommandWithResult( _, err = e2essh.IssueSSHCommandWithResult(
"ls /proc/net/nf_conntrack", "ls /proc/net/nf_conntrack",
framework.TestContext.Provider, framework.TestContext.Provider,
clientNodeInfo.node) clientNodeInfo.node)

View File

@ -19,12 +19,12 @@ package network
// Tests network performance using iperf or other containers. // Tests network performance using iperf or other containers.
import ( import (
"fmt" "fmt"
"math"
"time" "time"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -53,13 +53,13 @@ func networkingIPerfTest(isIPv6 bool) {
} }
ginkgo.It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { ginkgo.It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate. // for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
framework.ExpectNotEqual(totalPods, 0)
appName := "iperf-e2e" appName := "iperf-e2e"
_, err := f.CreateServiceForSimpleAppWithPods( _, err = f.CreateServiceForSimpleAppWithPods(
8001, 8001,
8002, 8002,
appName, appName,
@ -108,16 +108,14 @@ func networkingIPerfTest(isIPv6 bool) {
}, },
numClient, numClient,
) )
expectedCli := numClient
if len(nodes.Items) < expectedCli {
expectedCli = len(nodes.Items)
}
framework.Logf("Reading all perf results to stdout.") framework.Logf("Reading all perf results to stdout.")
framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits") framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
// Calculate expected number of clients based on total nodes.
expectedCli := func() int {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}()
// Extra 1/10 second per client. // Extra 1/10 second per client.
iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second) iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second)
iperfResults := &IPerfResults{} iperfResults := &IPerfResults{}

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/net"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -287,23 +288,16 @@ func truncate(b []byte, maxLen int) []byte {
return b2 return b2
} }
func pickNode(cs clientset.Interface) (string, error) {
// TODO: investigate why it doesn't work on master Node.
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy")
}
return nodes.Items[0].Name, nil
}
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
node, err := pickNode(f.ClientSet) // TODO: investigate why it doesn't work on master Node.
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// TODO: Change it to test whether all requests succeeded when requests // TODO: Change it to test whether all requests succeeded when requests
// not reaching Kubelet issue is debugged. // not reaching Kubelet issue is debugged.
serviceUnavailableErrors := 0 serviceUnavailableErrors := 0
for i := 0; i < proxyAttempts; i++ { for i := 0; i < proxyAttempts; i++ {
_, status, d, err := doProxy(f, prefix+node+nodeDest, i) _, status, d, err := doProxy(f, prefix+node.Name+nodeDest, i)
if status == http.StatusServiceUnavailable { if status == http.StatusServiceUnavailable {
framework.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err) framework.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err)
time.Sleep(time.Second) time.Sleep(time.Second)

View File

@ -295,7 +295,8 @@ var _ = SIGDescribe("Services", func() {
framework.Logf("sourceip-test cluster ip: %s", serviceIP) framework.Logf("sourceip-test cluster ip: %s", serviceIP)
ginkgo.By("Picking 2 Nodes to test whether source IP is preserved or not") ginkgo.By("Picking 2 Nodes to test whether source IP is preserved or not")
nodes := jig.GetNodes(2) nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
nodeCounts := len(nodes.Items) nodeCounts := len(nodes.Items)
if nodeCounts < 2 { if nodeCounts < 2 {
framework.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts) framework.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
@ -305,7 +306,7 @@ var _ = SIGDescribe("Services", func() {
serverPodName := "echo-sourceip" serverPodName := "echo-sourceip"
pod := f.NewAgnhostPod(serverPodName, "netexec", "--http-port", strconv.Itoa(servicePort)) pod := f.NewAgnhostPod(serverPodName, "netexec", "--http-port", strconv.Itoa(servicePort))
pod.Labels = jig.Labels pod.Labels = jig.Labels
_, err := cs.CoreV1().Pods(ns).Create(pod) _, err = cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
defer func() { defer func() {
@ -566,7 +567,9 @@ var _ = SIGDescribe("Services", func() {
loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
} }
loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
} }
@ -1522,7 +1525,9 @@ var _ = SIGDescribe("Services", func() {
loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
} }
loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
} }
@ -1540,7 +1545,6 @@ var _ = SIGDescribe("Services", func() {
// This container is an nginx container listening on port 80 // This container is an nginx container listening on port 80
// See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response
jig.RunOrFail(namespace, nil) jig.RunOrFail(namespace, nil)
var err error
// Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons. // Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons.
acceptPod, err = cs.CoreV1().Pods(namespace).Get(acceptPod.Name, metav1.GetOptions{}) acceptPod, err = cs.CoreV1().Pods(namespace).Get(acceptPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name) framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name)
@ -1598,7 +1602,9 @@ var _ = SIGDescribe("Services", func() {
framework.SkipUnlessProviderIs("azure", "gke", "gce") framework.SkipUnlessProviderIs("azure", "gke", "gce")
createTimeout := e2eservice.LoadBalancerCreateTimeoutDefault createTimeout := e2eservice.LoadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
createTimeout = e2eservice.LoadBalancerCreateTimeoutLarge createTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
} }
@ -1981,7 +1987,8 @@ var _ = SIGDescribe("Services", func() {
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "no-pods" serviceName := "no-pods"
jig := e2eservice.NewTestJig(cs, serviceName) jig := e2eservice.NewTestJig(cs, serviceName)
nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
framework.ExpectNoError(err)
labels := map[string]string{ labels := map[string]string{
"nopods": "nopods", "nopods": "nopods",
} }
@ -1992,7 +1999,7 @@ var _ = SIGDescribe("Services", func() {
}} }}
ginkgo.By("creating a service with no endpoints") ginkgo.By("creating a service with no endpoints")
_, err := jig.CreateServiceWithServicePort(labels, namespace, ports) _, err = jig.CreateServiceWithServicePort(labels, namespace, ports)
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to create service: %v", err) framework.Failf("ginkgo.Failed to create service: %v", err)
} }
@ -2076,7 +2083,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
cs = f.ClientSet cs = f.ClientSet
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
} }
}) })
@ -2162,7 +2171,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
namespace := f.Namespace.Name namespace := f.Namespace.Name
serviceName := "external-local-nodes" serviceName := "external-local-nodes"
jig := e2eservice.NewTestJig(cs, serviceName) jig := e2eservice.NewTestJig(cs, serviceName)
nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
framework.ExpectNoError(err)
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false, svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false,
func(svc *v1.Service) { func(svc *v1.Service) {
@ -2285,7 +2295,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
serviceName := "external-local-update" serviceName := "external-local-update"
jig := e2eservice.NewTestJig(cs, serviceName) jig := e2eservice.NewTestJig(cs, serviceName)
nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, e2eservice.MaxNodesForEndpointsTests)
framework.ExpectNoError(err)
if len(nodes.Items) < 2 { if len(nodes.Items) < 2 {
framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
} }
@ -2450,7 +2461,8 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
var svcIP string var svcIP string
if serviceType == v1.ServiceTypeNodePort { if serviceType == v1.ServiceTypeNodePort {
nodes := framework.GetReadySchedulableNodesOrDie(cs) nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
addrs := e2enode.CollectAddresses(nodes, v1.NodeInternalIP) addrs := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP") gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP")
svcIP = addrs[0] svcIP = addrs[0]

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
@ -270,18 +271,10 @@ var _ = SIGDescribe("kubelet", func() {
// nodes we observe initially. // nodes we observe initially.
nodeLabels = make(map[string]string) nodeLabels = make(map[string]string)
nodeLabels["kubelet_cleanup"] = "true" nodeLabels["kubelet_cleanup"] = "true"
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes, err := e2enode.GetBoundedReadySchedulableNodes(c, maxNodesToCheck)
numNodes = len(nodes.Items) framework.ExpectNoError(err)
framework.ExpectNotEqual(numNodes, 0)
nodeNames = sets.NewString() nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them for i := 0; i < len(nodes.Items); i++ {
// (if there are 1000 nodes in the cluster, starting 10 pods/node
// will take ~10 minutes today). And there is also deletion phase.
// Instead, we choose at most 10 nodes.
if numNodes > maxNodesToCheck {
numNodes = maxNodesToCheck
}
for i := 0; i < numNodes; i++ {
nodeNames.Insert(nodes.Items[i].Name) nodeNames.Insert(nodes.Items[i].Name)
} }
for nodeName := range nodeNames { for nodeName := range nodeNames {

View File

@ -27,6 +27,7 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf" e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
"k8s.io/kubernetes/test/e2e/perftype" "k8s.io/kubernetes/test/e2e/perftype"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -198,7 +199,8 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
var rm *e2ekubelet.ResourceMonitor var rm *e2ekubelet.ResourceMonitor
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
nodeNames.Insert(node.Name) nodeNames.Insert(node.Name)

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -85,9 +86,8 @@ var _ = SIGDescribe("Mount propagation", func() {
// propagated to the right places. // propagated to the right places.
// Pick a node where all pods will run. // Pick a node where all pods will run.
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNotEqual(len(nodes.Items), 0, "No available nodes for scheduling") framework.ExpectNoError(err)
node := &nodes.Items[0]
// Fail the test if the namespace is not set. We expect that the // Fail the test if the namespace is not set. We expect that the
// namespace is unique and we might delete user data if it's not. // namespace is unique and we might delete user data if it's not.
@ -139,7 +139,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// The host mounts one tmpfs to testdir/host and puts a file there so we // The host mounts one tmpfs to testdir/host and puts a file there so we
// can check mount propagation from the host to pods. // can check mount propagation from the host to pods.
cmd := fmt.Sprintf("sudo mkdir %[1]q/host; sudo mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir) cmd := fmt.Sprintf("sudo mkdir %[1]q/host; sudo mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir)
err := e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node) err = e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {

View File

@ -58,8 +58,8 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
ginkgo.By("Getting all nodes and their SSH-able IP addresses") ginkgo.By("Getting all nodes and their SSH-able IP addresses")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNotEqual(len(nodes.Items), 0) framework.ExpectNoError(err)
hosts := []string{} hosts := []string{}
for _, node := range nodes.Items { for _, node := range nodes.Items {
for _, addr := range node.Status.Addresses { for _, addr := range node.Status.Addresses {

View File

@ -59,6 +59,7 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/timer" "k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -158,14 +159,14 @@ var _ = SIGDescribe("Load capacity", func() {
clientset = f.ClientSet clientset = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(clientset)
nodeCount = len(nodes.Items) _, err := e2enode.GetRandomReadySchedulableNode(clientset)
gomega.Expect(nodeCount).NotTo(gomega.BeZero()) framework.ExpectNoError(err)
// Terminating a namespace (deleting the remaining objects from it - which // Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all // generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test. // terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(clientset, ns) err = framework.CheckTestingNSDeletedExcept(clientset, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2emetrics.ResetMetrics(clientset)) framework.ExpectNoError(e2emetrics.ResetMetrics(clientset))

View File

@ -95,7 +95,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
if err != nil { if err != nil {
framework.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
nodeList, err = e2enode.GetReadySchedulableNodesOrDie(cs) nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil { if err != nil {
framework.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }

View File

@ -37,6 +37,7 @@ go_library(
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library", "//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/statefulset:go_default_library", "//test/e2e/framework/statefulset:go_default_library",
"//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/testfiles:go_default_library",

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -118,7 +119,13 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
return false, nil return false, nil
} }
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items) nodes, err := e2enode.GetReadySchedulableNodes(c)
if err != nil {
framework.Logf("Failed to get nodes: %v", err)
return false, nil
}
numberSchedulableNodes := len(nodes.Items)
numberkubeProxyPods := 0 numberkubeProxyPods := 0
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodRunning { if pod.Status.Phase == v1.PodRunning {
@ -176,7 +183,13 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
return false, nil return false, nil
} }
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items) nodes, err := e2enode.GetReadySchedulableNodes(c)
if err != nil {
framework.Logf("Failed to get nodes: %v", err)
return false, nil
}
numberSchedulableNodes := len(nodes.Items)
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable) numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
if numberkubeProxyPods != numberSchedulableNodes { if numberkubeProxyPods != numberSchedulableNodes {
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods) framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)

View File

@ -47,6 +47,7 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/blang/semver:go_default_library",
"//vendor/github.com/coreos/go-systemd/util:go_default_library", "//vendor/github.com/coreos/go-systemd/util:go_default_library",

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -179,8 +180,9 @@ func disableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.K
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
gomega.Eventually(func() bool { gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.TotalReady(f.ClientSet)
return len(nodeList.Items) == 1 framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue()) }, time.Minute, time.Second).Should(gomega.BeTrue())
return oldCfg return oldCfg
@ -231,8 +233,9 @@ func enableCPUManagerInKubelet(f *framework.Framework, cleanStateFile bool) (old
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
gomega.Eventually(func() bool { gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.TotalReady(f.ClientSet)
return len(nodeList.Items) == 1 framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue()) }, time.Minute, time.Second).Should(gomega.BeTrue())
return oldCfg return oldCfg

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e_node/perf/workloads" "k8s.io/kubernetes/test/e2e_node/perf/workloads"
@ -48,8 +49,9 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
gomega.Eventually(func() bool { gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes, err := e2enode.TotalReady(f.ClientSet)
return len(nodeList.Items) == 1 framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue()) }, time.Minute, time.Second).Should(gomega.BeTrue())
} }

View File

@ -51,7 +51,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -345,7 +345,8 @@ func logNodeEvents(f *framework.Framework) {
} }
func getLocalNode(f *framework.Framework) *v1.Node { func getLocalNode(f *framework.Framework) *v1.Node {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodeList.Items), 1, "Unexpected number of node objects for node e2e. Expects only one node.") framework.ExpectEqual(len(nodeList.Items), 1, "Unexpected number of node objects for node e2e. Expects only one node.")
return &nodeList.Items[0] return &nodeList.Items[0]
} }
@ -358,7 +359,7 @@ func logKubeletLatencyMetrics(metricNames ...string) {
for _, key := range metricNames { for _, key := range metricNames {
metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key) metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key)
} }
metric, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") metric, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
if err != nil { if err != nil {
framework.Logf("Error getting kubelet metrics: %v", err) framework.Logf("Error getting kubelet metrics: %v", err)
} else { } else {
@ -367,14 +368,14 @@ func logKubeletLatencyMetrics(metricNames ...string) {
} }
// returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in // returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in
func getKubeletMetrics(filterMetricNames sets.String) (frameworkmetrics.KubeletMetrics, error) { func getKubeletMetrics(filterMetricNames sets.String) (e2emetrics.KubeletMetrics, error) {
// grab Kubelet metrics // grab Kubelet metrics
ms, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
if err != nil { if err != nil {
return nil, err return nil, err
} }
filtered := frameworkmetrics.NewKubeletMetrics() filtered := e2emetrics.NewKubeletMetrics()
for name := range ms { for name := range ms {
if !filterMetricNames.Has(name) { if !filterMetricNames.Has(name) {
continue continue

View File

@ -67,7 +67,7 @@ go_library(
"//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/component-base/version:go_default_library", "//staging/src/k8s.io/component-base/version:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library",

View File

@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2eframework "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"k8s.io/klog" "k8s.io/klog"
@ -84,7 +84,10 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
} }
} }
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client) nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
klog.Fatalf("Error listing nodes: %v", err)
}
index := 0 index := 0
sum := 0 sum := 0
for _, v := range p.countToStrategy { for _, v := range p.countToStrategy {
@ -101,7 +104,10 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
// CleanupNodes deletes existing test nodes. // CleanupNodes deletes existing test nodes.
func (p *IntegrationTestNodePreparer) CleanupNodes() error { func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client) nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
klog.Fatalf("Error listing nodes: %v", err)
}
for i := range nodes.Items { for i := range nodes.Items {
if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil {
klog.Errorf("Error while deleting Node: %v", err) klog.Errorf("Error while deleting Node: %v", err)