Refactor and Move node related methods to framework/node package
Signed-off-by: Jiatong Wang <wangjiatong@vmware.com>
This commit is contained in:
@@ -10,10 +10,13 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
|
||||
@@ -21,13 +21,17 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
@@ -48,6 +52,14 @@ const (
|
||||
proxyTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
// PodNode is a pod-node pair indicating which node a given pod is running on
|
||||
type PodNode struct {
|
||||
// Pod represents pod name
|
||||
Pod string
|
||||
// Node represents node name
|
||||
Node string
|
||||
}
|
||||
|
||||
// FirstAddress returns the first address of the given type of each node.
|
||||
// TODO: Use return type string instead of []string
|
||||
func FirstAddress(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
|
||||
@@ -315,3 +327,141 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
// PickIP picks one public node IP
|
||||
func PickIP(c clientset.Interface) (string, error) {
|
||||
publicIps, err := GetPublicIps(c)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get node public IPs error: %s", err)
|
||||
}
|
||||
if len(publicIps) == 0 {
|
||||
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
|
||||
}
|
||||
ip := publicIps[0]
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// GetPublicIps returns a public IP list of nodes.
|
||||
func GetPublicIps(c clientset.Interface) ([]string, error) {
|
||||
nodes, err := GetReadySchedulableNodesOrDie(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
|
||||
}
|
||||
ips := CollectAddresses(nodes, v1.NodeExternalIP)
|
||||
if len(ips) == 0 {
|
||||
// If ExternalIP isn't set, assume the test programs can reach the InternalIP
|
||||
ips = CollectAddresses(nodes, v1.NodeInternalIP)
|
||||
}
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
|
||||
// 1) Needs to be schedulable.
|
||||
// 2) Needs to be ready.
|
||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
||||
// TODO: remove references in framework/util.go.
|
||||
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = waitListSchedulableNodesOrDie(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||
}
|
||||
// previous tests may have cause failures of some nodes. Let's skip
|
||||
// 'Not Ready' nodes, just in case (there is no need to fail the test).
|
||||
Filter(nodes, func(node v1.Node) bool {
|
||||
return isNodeSchedulable(&node) && isNodeUntainted(&node)
|
||||
})
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// GetReadyNodesIncludingTaintedOrDie returns all ready nodes, even those which are tainted.
|
||||
// There are cases when we care about tainted nodes
|
||||
// E.g. in tests related to nodes with gpu we care about nodes despite
|
||||
// presence of nvidia.com/gpu=present:NoSchedule taint
|
||||
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
|
||||
nodes, err = waitListSchedulableNodesOrDie(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
|
||||
}
|
||||
Filter(nodes, func(node v1.Node) bool {
|
||||
return isNodeSchedulable(&node)
|
||||
})
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
|
||||
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList, error) {
|
||||
nodes := &v1.NodeList{}
|
||||
masters := sets.NewString()
|
||||
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("get nodes error: %s", err)
|
||||
}
|
||||
for _, n := range all.Items {
|
||||
if system.IsMasterNode(n.Name) {
|
||||
masters.Insert(n.Name)
|
||||
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
|
||||
nodes.Items = append(nodes.Items, n)
|
||||
}
|
||||
}
|
||||
return masters, nodes, nil
|
||||
}
|
||||
|
||||
// Test whether a fake pod can be scheduled on "node", given its current taints.
|
||||
func isNodeUntainted(node *v1.Node) bool {
|
||||
fakePod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-not-scheduled",
|
||||
Namespace: "fake-not-scheduled",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-not-scheduled",
|
||||
Image: "fake-not-scheduled",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err)
|
||||
return false
|
||||
}
|
||||
return fit
|
||||
}
|
||||
|
||||
// Node is schedulable if:
|
||||
// 1) doesn't have "unschedulable" field set
|
||||
// 2) it's Ready condition is set to true
|
||||
// 3) doesn't have NetworkUnavailable condition set to true
|
||||
func isNodeSchedulable(node *v1.Node) bool {
|
||||
nodeReady := IsConditionSetAsExpected(node, v1.NodeReady, true)
|
||||
networkReady := IsConditionUnset(node, v1.NodeNetworkUnavailable) ||
|
||||
IsConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
|
||||
return !node.Spec.Unschedulable && nodeReady && networkReady
|
||||
}
|
||||
|
||||
// PodNodePairs return podNode pairs for all pods in a namespace
|
||||
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
var result []PodNode
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
result = append(result, PodNode{
|
||||
Pod: pod.Name,
|
||||
Node: pod.Spec.NodeName,
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -197,3 +197,12 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
|
||||
func waitListSchedulableNodesOrDie(c clientset.Interface) (*v1.NodeList, error) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err)
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -116,14 +116,6 @@ type ServiceTestJig struct {
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// PodNode is a pod-node pair indicating which node a given pod is running on
|
||||
type PodNode struct {
|
||||
// Pod represents pod name
|
||||
Pod string
|
||||
// Node represents node name
|
||||
Node string
|
||||
}
|
||||
|
||||
// NewServiceTestJig allocates and inits a new ServiceTestJig.
|
||||
func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig {
|
||||
j := &ServiceTestJig{}
|
||||
@@ -325,48 +317,6 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string
|
||||
return svc
|
||||
}
|
||||
|
||||
// GetNodePublicIps returns a public IP list of nodes.
|
||||
func GetNodePublicIps(c clientset.Interface) ([]string, error) {
|
||||
nodes := GetReadySchedulableNodesOrDie(c)
|
||||
|
||||
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
|
||||
if len(ips) == 0 {
|
||||
// If ExternalIP isn't set, assume the test programs can reach the InternalIP
|
||||
ips = e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
|
||||
}
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// PickNodeIP picks one public node IP
|
||||
func PickNodeIP(c clientset.Interface) string {
|
||||
publicIps, err := GetNodePublicIps(c)
|
||||
ExpectNoError(err)
|
||||
if len(publicIps) == 0 {
|
||||
e2elog.Failf("got unexpected number (%d) of public IPs", len(publicIps))
|
||||
}
|
||||
ip := publicIps[0]
|
||||
return ip
|
||||
}
|
||||
|
||||
// PodNodePairs return PodNode pairs for all pods in a namespace
|
||||
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
var result []PodNode
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
result = append(result, PodNode{
|
||||
Pod: pod.Name,
|
||||
Node: pod.Spec.NodeName,
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetEndpointNodes returns a map of nodenames:external-ip on which the
|
||||
// endpoints of the given Service are running.
|
||||
func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
|
||||
|
||||
@@ -79,7 +79,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
@@ -1943,6 +1942,7 @@ func isNodeUntainted(node *v1.Node) bool {
|
||||
// 1) Needs to be schedulable.
|
||||
// 2) Needs to be ready.
|
||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
||||
// TODO: remove this function here when references point to e2enode.
|
||||
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
|
||||
nodes = waitListSchedulableNodesOrDie(c)
|
||||
// previous tests may have cause failures of some nodes. Let's skip
|
||||
@@ -1953,18 +1953,6 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
|
||||
return nodes
|
||||
}
|
||||
|
||||
// GetReadyNodesIncludingTaintedOrDie returns all ready nodes, even those which are tainted.
|
||||
// There are cases when we care about tainted nodes
|
||||
// E.g. in tests related to nodes with gpu we care about nodes despite
|
||||
// presence of nvidia.com/gpu=present:NoSchedule taint
|
||||
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList) {
|
||||
nodes = waitListSchedulableNodesOrDie(c)
|
||||
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||
return isNodeSchedulable(&node)
|
||||
})
|
||||
return nodes
|
||||
}
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
@@ -3046,22 +3034,6 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
||||
return len(scheduledPods)
|
||||
}
|
||||
|
||||
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
|
||||
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
|
||||
nodes := &v1.NodeList{}
|
||||
masters := sets.NewString()
|
||||
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
for _, n := range all.Items {
|
||||
if system.IsMasterNode(n.Name) {
|
||||
masters.Insert(n.Name)
|
||||
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
|
||||
nodes.Items = append(nodes.Items, n)
|
||||
}
|
||||
}
|
||||
return masters, nodes
|
||||
}
|
||||
|
||||
// ListNamespaceEvents lists the events in the given namespace.
|
||||
func ListNamespaceEvents(c clientset.Interface, ns string) error {
|
||||
ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{})
|
||||
|
||||
Reference in New Issue
Block a user