move pkg/util/node to component-helpers/node/util (#105347)
Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com>
This commit is contained in:
parent
7b9f4f18fe
commit
fa1b6765d5
@ -25,7 +25,7 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/mount-utils"
|
"k8s.io/mount-utils"
|
||||||
|
|
||||||
"k8s.io/component-helpers/node/utils/sysctl"
|
"k8s.io/component-helpers/node/util/sysctl"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Conntracker is an interface to the global sysctl. Descriptions of the various
|
// Conntracker is an interface to the global sysctl. Descriptions of the various
|
||||||
|
@ -48,7 +48,7 @@ import (
|
|||||||
toolswatch "k8s.io/client-go/tools/watch"
|
toolswatch "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/component-base/configz"
|
"k8s.io/component-base/configz"
|
||||||
"k8s.io/component-base/metrics"
|
"k8s.io/component-base/metrics"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
"k8s.io/kubernetes/pkg/proxy"
|
||||||
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
|
proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/proxy/apis/config/scheme"
|
"k8s.io/kubernetes/pkg/proxy/apis/config/scheme"
|
||||||
|
@ -41,7 +41,7 @@ import (
|
|||||||
_ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration
|
_ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration
|
||||||
"k8s.io/component-base/version"
|
"k8s.io/component-base/version"
|
||||||
"k8s.io/component-base/version/verflag"
|
"k8s.io/component-base/version/verflag"
|
||||||
fakesysctl "k8s.io/component-helpers/node/utils/sysctl/testing"
|
fakesysctl "k8s.io/component-helpers/node/util/sysctl/testing"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/apis/core"
|
"k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/cluster/ports"
|
"k8s.io/kubernetes/pkg/cluster/ports"
|
||||||
|
@ -40,6 +40,8 @@
|
|||||||
"k8s.io/api/storage/v1alpha1": "storagev1alpha1",
|
"k8s.io/api/storage/v1alpha1": "storagev1alpha1",
|
||||||
"k8s.io/api/storage/v1beta1": "storagev1beta1",
|
"k8s.io/api/storage/v1beta1": "storagev1beta1",
|
||||||
"k8s.io/apimachinery/pkg/api/errors": "apierrors",
|
"k8s.io/apimachinery/pkg/api/errors": "apierrors",
|
||||||
|
"k8s.io/component-helpers/node/util": "nodeutil",
|
||||||
|
"k8s.io/kubernetes/pkg/controller/util/node": "controllerutil",
|
||||||
"k8s.io/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
|
"k8s.io/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
|
||||||
"k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1": "controllerconfigv1alpha1",
|
"k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1": "controllerconfigv1alpha1",
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1": "kubeletconfigv1beta1",
|
"k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1": "kubeletconfigv1beta1",
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/legacy-cloud-providers/gce"
|
"k8s.io/legacy-cloud-providers/gce"
|
||||||
"k8s.io/metrics/pkg/client/clientset/versioned/scheme"
|
"k8s.io/metrics/pkg/client/clientset/versioned/scheme"
|
||||||
)
|
)
|
||||||
|
@ -43,8 +43,8 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
utiltaints "k8s.io/kubernetes/pkg/util/taints"
|
utiltaints "k8s.io/kubernetes/pkg/util/taints"
|
||||||
"k8s.io/legacy-cloud-providers/gce"
|
"k8s.io/legacy-cloud-providers/gce"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
@ -112,21 +112,21 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: nodeutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
|
AddFunc: controllerutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
|
||||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||||
if newNode.Spec.PodCIDR == "" {
|
if newNode.Spec.PodCIDR == "" {
|
||||||
return ca.AllocateOrOccupyCIDR(newNode)
|
return ca.AllocateOrOccupyCIDR(newNode)
|
||||||
}
|
}
|
||||||
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
|
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
|
||||||
// set to true, we need to process the node to set the condition.
|
// set to true, we need to process the node to set the condition.
|
||||||
networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||||
_, cond := nodeutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
|
_, cond := controllerutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
|
||||||
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
|
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
|
||||||
return ca.AllocateOrOccupyCIDR(newNode)
|
return ca.AllocateOrOccupyCIDR(newNode)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
DeleteFunc: controllerutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
||||||
})
|
})
|
||||||
|
|
||||||
klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||||
@ -258,11 +258,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
|||||||
|
|
||||||
cidrStrings, err := ca.cloud.AliasRangesByProviderID(node.Spec.ProviderID)
|
cidrStrings, err := ca.cloud.AliasRangesByProviderID(node.Spec.ProviderID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
||||||
return fmt.Errorf("failed to get cidr(s) from provider: %v", err)
|
return fmt.Errorf("failed to get cidr(s) from provider: %v", err)
|
||||||
}
|
}
|
||||||
if len(cidrStrings) == 0 {
|
if len(cidrStrings) == 0 {
|
||||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
||||||
return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name)
|
return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name)
|
||||||
}
|
}
|
||||||
//Can have at most 2 ips (one for v4 and one for v6)
|
//Can have at most 2 ips (one for v4 and one for v6)
|
||||||
@ -290,19 +290,19 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
|||||||
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
|
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
|
||||||
}
|
}
|
||||||
for i := 0; i < cidrUpdateRetries; i++ {
|
for i := 0; i < cidrUpdateRetries; i++ {
|
||||||
if err = utilnode.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil {
|
if err = nodeutil.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil {
|
||||||
klog.InfoS("Set the node PodCIDRs", "nodeName", node.Name, "cidrStrings", cidrStrings)
|
klog.InfoS("Set the node PodCIDRs", "nodeName", node.Name, "cidrStrings", cidrStrings)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
|
controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
|
||||||
klog.ErrorS(err, "Failed to update the node PodCIDR after multiple attempts", "nodeName", node.Name, "cidrStrings", cidrStrings)
|
klog.ErrorS(err, "Failed to update the node PodCIDR after multiple attempts", "nodeName", node.Name, "cidrStrings", cidrStrings)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = utilnode.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{
|
err = nodeutil.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{
|
||||||
Type: v1.NodeNetworkUnavailable,
|
Type: v1.NodeNetworkUnavailable,
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: "RouteCreated",
|
Reason: "RouteCreated",
|
||||||
|
@ -35,7 +35,7 @@ import (
|
|||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||||
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
|
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
"k8s.io/legacy-cloud-providers/gce"
|
"k8s.io/legacy-cloud-providers/gce"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -142,9 +142,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: nodeutil.CreateAddNodeHandler(c.onAdd),
|
AddFunc: controllerutil.CreateAddNodeHandler(c.onAdd),
|
||||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(c.onUpdate),
|
UpdateFunc: controllerutil.CreateUpdateNodeHandler(c.onUpdate),
|
||||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(c.onDelete),
|
DeleteFunc: controllerutil.CreateDeleteNodeHandler(c.onDelete),
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -36,9 +36,9 @@ import (
|
|||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// cidrs are reserved, then node resource is patched with them
|
// cidrs are reserved, then node resource is patched with them
|
||||||
@ -135,8 +135,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
|
AddFunc: controllerutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
|
||||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||||
// If the PodCIDRs list is not empty we either:
|
// If the PodCIDRs list is not empty we either:
|
||||||
// - already processed a Node that already had CIDRs after NC restarted
|
// - already processed a Node that already had CIDRs after NC restarted
|
||||||
// (cidr is marked as used),
|
// (cidr is marked as used),
|
||||||
@ -161,7 +161,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ra.ReleaseCIDR),
|
DeleteFunc: controllerutil.CreateDeleteNodeHandler(ra.ReleaseCIDR),
|
||||||
})
|
})
|
||||||
|
|
||||||
return ra, nil
|
return ra, nil
|
||||||
@ -268,7 +268,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
|||||||
podCIDR, err := r.cidrSets[idx].AllocateNext()
|
podCIDR, err := r.cidrSets[idx].AllocateNext()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.removeNodeFromProcessing(node.Name)
|
r.removeNodeFromProcessing(node.Name)
|
||||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
|
controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
|
||||||
return fmt.Errorf("failed to allocate cidr from cluster cidr at idx:%v: %v", idx, err)
|
return fmt.Errorf("failed to allocate cidr from cluster cidr at idx:%v: %v", idx, err)
|
||||||
}
|
}
|
||||||
allocated.allocatedCIDRs[idx] = podCIDR
|
allocated.allocatedCIDRs[idx] = podCIDR
|
||||||
@ -370,14 +370,14 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error {
|
|||||||
|
|
||||||
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
||||||
for i := 0; i < cidrUpdateRetries; i++ {
|
for i := 0; i < cidrUpdateRetries; i++ {
|
||||||
if err = utilnode.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil {
|
if err = nodeutil.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil {
|
||||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, cidrsString)
|
klog.Infof("Set node %v PodCIDR to %v", node.Name, cidrsString)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// failed release back to the pool
|
// failed release back to the pool
|
||||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, cidrsString, err)
|
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, cidrsString, err)
|
||||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
||||||
// We accept the fact that we may leak CIDRs here. This is safer than releasing
|
// We accept the fact that we may leak CIDRs here. This is safer than releasing
|
||||||
// them in case when we don't know if request went through.
|
// them in case when we don't know if request went through.
|
||||||
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
||||||
|
@ -51,11 +51,11 @@ import (
|
|||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/component-base/metrics/prometheus/ratelimiter"
|
"k8s.io/component-base/metrics/prometheus/ratelimiter"
|
||||||
utilnode "k8s.io/component-helpers/node/topology"
|
nodetopology "k8s.io/component-helpers/node/topology"
|
||||||
kubeletapis "k8s.io/kubelet/pkg/apis"
|
kubeletapis "k8s.io/kubelet/pkg/apis"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -487,15 +487,15 @@ func NewNodeLifecycleController(
|
|||||||
nodeGetter := func(name string) (*v1.Node, error) { return nodeLister.Get(name) }
|
nodeGetter := func(name string) (*v1.Node, error) { return nodeLister.Get(name) }
|
||||||
nc.taintManager = scheduler.NewNoExecuteTaintManager(ctx, kubeClient, podGetter, nodeGetter, nc.getPodsAssignedToNode)
|
nc.taintManager = scheduler.NewNoExecuteTaintManager(ctx, kubeClient, podGetter, nodeGetter, nc.getPodsAssignedToNode)
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error {
|
AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error {
|
||||||
nc.taintManager.NodeUpdated(nil, node)
|
nc.taintManager.NodeUpdated(nil, node)
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error {
|
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error {
|
||||||
nc.taintManager.NodeUpdated(oldNode, newNode)
|
nc.taintManager.NodeUpdated(oldNode, newNode)
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(func(node *v1.Node) error {
|
DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error {
|
||||||
nc.taintManager.NodeUpdated(node, nil)
|
nc.taintManager.NodeUpdated(node, nil)
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
@ -504,16 +504,16 @@ func NewNodeLifecycleController(
|
|||||||
|
|
||||||
klog.Infof("Controller will reconcile labels.")
|
klog.Infof("Controller will reconcile labels.")
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error {
|
AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error {
|
||||||
nc.nodeUpdateQueue.Add(node.Name)
|
nc.nodeUpdateQueue.Add(node.Name)
|
||||||
nc.nodeEvictionMap.registerNode(node.Name)
|
nc.nodeEvictionMap.registerNode(node.Name)
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||||
nc.nodeUpdateQueue.Add(newNode.Name)
|
nc.nodeUpdateQueue.Add(newNode.Name)
|
||||||
return nil
|
return nil
|
||||||
}),
|
}),
|
||||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(func(node *v1.Node) error {
|
DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error {
|
||||||
nc.nodesToRetry.Delete(node.Name)
|
nc.nodesToRetry.Delete(node.Name)
|
||||||
nc.nodeEvictionMap.unregisterNode(node.Name)
|
nc.nodeEvictionMap.unregisterNode(node.Name)
|
||||||
return nil
|
return nil
|
||||||
@ -657,7 +657,7 @@ func (nc *Controller) doNoScheduleTaintingPass(ctx context.Context, nodeName str
|
|||||||
if len(taintsToAdd) == 0 && len(taintsToDel) == 0 {
|
if len(taintsToAdd) == 0 && len(taintsToDel) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, taintsToAdd, taintsToDel, node) {
|
if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, taintsToAdd, taintsToDel, node) {
|
||||||
return fmt.Errorf("failed to swap taints of node %+v", node)
|
return fmt.Errorf("failed to swap taints of node %+v", node)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -678,7 +678,7 @@ func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) {
|
|||||||
// retry in 50 millisecond
|
// retry in 50 millisecond
|
||||||
return false, 50 * time.Millisecond
|
return false, 50 * time.Millisecond
|
||||||
}
|
}
|
||||||
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
_, condition := controllerutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||||
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
|
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
|
||||||
taintToAdd := v1.Taint{}
|
taintToAdd := v1.Taint{}
|
||||||
oppositeTaint := v1.Taint{}
|
oppositeTaint := v1.Taint{}
|
||||||
@ -694,11 +694,10 @@ func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) {
|
|||||||
klog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
|
klog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
|
||||||
return true, 0
|
return true, 0
|
||||||
}
|
}
|
||||||
|
result := controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node)
|
||||||
result := nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node)
|
|
||||||
if result {
|
if result {
|
||||||
//count the evictionsNumber
|
//count the evictionsNumber
|
||||||
zone := utilnode.GetZoneKey(node)
|
zone := nodetopology.GetZoneKey(node)
|
||||||
evictionsNumber.WithLabelValues(zone).Inc()
|
evictionsNumber.WithLabelValues(zone).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -725,7 +724,7 @@ func (nc *Controller) doEvictionPass(ctx context.Context) {
|
|||||||
utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %v", value.Value, err))
|
utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %v", value.Value, err))
|
||||||
return false, 0
|
return false, 0
|
||||||
}
|
}
|
||||||
remaining, err := nodeutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore)
|
remaining, err := controllerutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We are not setting eviction status here.
|
// We are not setting eviction status here.
|
||||||
// New pods will be handled by zonePodEvictor retry
|
// New pods will be handled by zonePodEvictor retry
|
||||||
@ -741,7 +740,7 @@ func (nc *Controller) doEvictionPass(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if node != nil {
|
if node != nil {
|
||||||
zone := utilnode.GetZoneKey(node)
|
zone := nodetopology.GetZoneKey(node)
|
||||||
evictionsNumber.WithLabelValues(zone).Inc()
|
evictionsNumber.WithLabelValues(zone).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -768,7 +767,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
|
|||||||
|
|
||||||
for i := range added {
|
for i := range added {
|
||||||
klog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name)
|
klog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name)
|
||||||
nodeutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name))
|
controllerutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name))
|
||||||
nc.knownNodeSet[added[i].Name] = added[i]
|
nc.knownNodeSet[added[i].Name] = added[i]
|
||||||
nc.addPodEvictorForNewZone(added[i])
|
nc.addPodEvictorForNewZone(added[i])
|
||||||
if nc.runTaintManager {
|
if nc.runTaintManager {
|
||||||
@ -780,7 +779,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
|
|||||||
|
|
||||||
for i := range deleted {
|
for i := range deleted {
|
||||||
klog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name)
|
klog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name)
|
||||||
nodeutil.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name))
|
controllerutil.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name))
|
||||||
delete(nc.knownNodeSet, deleted[i].Name)
|
delete(nc.knownNodeSet, deleted[i].Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -810,7 +809,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
|
|||||||
|
|
||||||
// Some nodes may be excluded from disruption checking
|
// Some nodes may be excluded from disruption checking
|
||||||
if !isNodeExcludedFromDisruptionChecks(node) {
|
if !isNodeExcludedFromDisruptionChecks(node) {
|
||||||
zoneToNodeConditions[utilnode.GetZoneKey(node)] = append(zoneToNodeConditions[utilnode.GetZoneKey(node)], currentReadyCondition)
|
zoneToNodeConditions[nodetopology.GetZoneKey(node)] = append(zoneToNodeConditions[nodetopology.GetZoneKey(node)], currentReadyCondition)
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentReadyCondition != nil {
|
if currentReadyCondition != nil {
|
||||||
@ -837,10 +836,10 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
|
|||||||
switch {
|
switch {
|
||||||
case currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue:
|
case currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue:
|
||||||
// Report node event only once when status changed.
|
// Report node event only once when status changed.
|
||||||
nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady")
|
controllerutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady")
|
||||||
fallthrough
|
fallthrough
|
||||||
case needsRetry && observedReadyCondition.Status != v1.ConditionTrue:
|
case needsRetry && observedReadyCondition.Status != v1.ConditionTrue:
|
||||||
if err = nodeutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, node.Name); err != nil {
|
if err = controllerutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, node.Name); err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err))
|
utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err))
|
||||||
nc.nodesToRetry.Store(node.Name, struct{}{})
|
nc.nodesToRetry.Store(node.Name, struct{}{})
|
||||||
continue
|
continue
|
||||||
@ -862,7 +861,7 @@ func (nc *Controller) processTaintBaseEviction(ctx context.Context, node *v1.Nod
|
|||||||
// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
|
// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
|
||||||
if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
|
if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
|
||||||
taintToAdd := *NotReadyTaintTemplate
|
taintToAdd := *NotReadyTaintTemplate
|
||||||
if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) {
|
if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) {
|
||||||
klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
||||||
}
|
}
|
||||||
} else if nc.markNodeForTainting(node, v1.ConditionFalse) {
|
} else if nc.markNodeForTainting(node, v1.ConditionFalse) {
|
||||||
@ -875,7 +874,7 @@ func (nc *Controller) processTaintBaseEviction(ctx context.Context, node *v1.Nod
|
|||||||
// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
|
// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
|
||||||
if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
|
if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
|
||||||
taintToAdd := *UnreachableTaintTemplate
|
taintToAdd := *UnreachableTaintTemplate
|
||||||
if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) {
|
if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) {
|
||||||
klog.Errorf("Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle.")
|
klog.Errorf("Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle.")
|
||||||
}
|
}
|
||||||
} else if nc.markNodeForTainting(node, v1.ConditionUnknown) {
|
} else if nc.markNodeForTainting(node, v1.ConditionUnknown) {
|
||||||
@ -962,7 +961,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t
|
|||||||
|
|
||||||
var gracePeriod time.Duration
|
var gracePeriod time.Duration
|
||||||
var observedReadyCondition v1.NodeCondition
|
var observedReadyCondition v1.NodeCondition
|
||||||
_, currentReadyCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
_, currentReadyCondition := controllerutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||||
if currentReadyCondition == nil {
|
if currentReadyCondition == nil {
|
||||||
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
|
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
|
||||||
// A fake ready condition is created, where LastHeartbeatTime and LastTransitionTime is set
|
// A fake ready condition is created, where LastHeartbeatTime and LastTransitionTime is set
|
||||||
@ -1005,7 +1004,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t
|
|||||||
var savedCondition *v1.NodeCondition
|
var savedCondition *v1.NodeCondition
|
||||||
var savedLease *coordv1.Lease
|
var savedLease *coordv1.Lease
|
||||||
if nodeHealth != nil {
|
if nodeHealth != nil {
|
||||||
_, savedCondition = nodeutil.GetNodeCondition(nodeHealth.status, v1.NodeReady)
|
_, savedCondition = controllerutil.GetNodeCondition(nodeHealth.status, v1.NodeReady)
|
||||||
savedLease = nodeHealth.lease
|
savedLease = nodeHealth.lease
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1077,7 +1076,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t
|
|||||||
|
|
||||||
nowTimestamp := nc.now()
|
nowTimestamp := nc.now()
|
||||||
for _, nodeConditionType := range nodeConditionTypes {
|
for _, nodeConditionType := range nodeConditionTypes {
|
||||||
_, currentCondition := nodeutil.GetNodeCondition(&node.Status, nodeConditionType)
|
_, currentCondition := controllerutil.GetNodeCondition(&node.Status, nodeConditionType)
|
||||||
if currentCondition == nil {
|
if currentCondition == nil {
|
||||||
klog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name)
|
klog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name)
|
||||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||||
@ -1100,7 +1099,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// We need to update currentReadyCondition due to its value potentially changed.
|
// We need to update currentReadyCondition due to its value potentially changed.
|
||||||
_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
_, currentReadyCondition = controllerutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||||
|
|
||||||
if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
|
if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
|
||||||
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{}); err != nil {
|
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{}); err != nil {
|
||||||
@ -1275,7 +1274,7 @@ func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
_, currentReadyCondition := nodeutil.GetNodeCondition(nodeHealth.status, v1.NodeReady)
|
_, currentReadyCondition := controllerutil.GetNodeCondition(nodeHealth.status, v1.NodeReady)
|
||||||
if currentReadyCondition == nil {
|
if currentReadyCondition == nil {
|
||||||
// Lack of NodeReady condition may only happen after node addition (or if it will be maliciously deleted).
|
// Lack of NodeReady condition may only happen after node addition (or if it will be maliciously deleted).
|
||||||
// In both cases, the pod will be handled correctly (evicted if needed) during processing
|
// In both cases, the pod will be handled correctly (evicted if needed) during processing
|
||||||
@ -1295,7 +1294,7 @@ func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if currentReadyCondition.Status != v1.ConditionTrue {
|
if currentReadyCondition.Status != v1.ConditionTrue {
|
||||||
if err := nodeutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, nodeName); err != nil {
|
if err := controllerutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, nodeName); err != nil {
|
||||||
klog.Warningf("Unable to mark pod %+v NotReady on node %v: %v.", podItem, nodeName, err)
|
klog.Warningf("Unable to mark pod %+v NotReady on node %v: %v.", podItem, nodeName, err)
|
||||||
nc.podUpdateQueue.AddRateLimited(podItem)
|
nc.podUpdateQueue.AddRateLimited(podItem)
|
||||||
}
|
}
|
||||||
@ -1339,7 +1338,7 @@ func (nc *Controller) classifyNodes(allNodes []*v1.Node) (added, deleted, newZon
|
|||||||
added = append(added, allNodes[i])
|
added = append(added, allNodes[i])
|
||||||
} else {
|
} else {
|
||||||
// Currently, we only consider new zone as updated.
|
// Currently, we only consider new zone as updated.
|
||||||
zone := utilnode.GetZoneKey(allNodes[i])
|
zone := nodetopology.GetZoneKey(allNodes[i])
|
||||||
if _, found := nc.zoneStates[zone]; !found {
|
if _, found := nc.zoneStates[zone]; !found {
|
||||||
newZoneRepresentatives = append(newZoneRepresentatives, allNodes[i])
|
newZoneRepresentatives = append(newZoneRepresentatives, allNodes[i])
|
||||||
}
|
}
|
||||||
@ -1382,7 +1381,7 @@ func (nc *Controller) ReducedQPSFunc(nodeNum int) float32 {
|
|||||||
func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) {
|
func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) {
|
||||||
nc.evictorLock.Lock()
|
nc.evictorLock.Lock()
|
||||||
defer nc.evictorLock.Unlock()
|
defer nc.evictorLock.Unlock()
|
||||||
zone := utilnode.GetZoneKey(node)
|
zone := nodetopology.GetZoneKey(node)
|
||||||
if _, found := nc.zoneStates[zone]; !found {
|
if _, found := nc.zoneStates[zone]; !found {
|
||||||
nc.zoneStates[zone] = stateInitial
|
nc.zoneStates[zone] = stateInitial
|
||||||
if !nc.runTaintManager {
|
if !nc.runTaintManager {
|
||||||
@ -1403,7 +1402,7 @@ func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) {
|
|||||||
// cancelPodEviction removes any queued evictions, typically because the node is available again. It
|
// cancelPodEviction removes any queued evictions, typically because the node is available again. It
|
||||||
// returns true if an eviction was queued.
|
// returns true if an eviction was queued.
|
||||||
func (nc *Controller) cancelPodEviction(node *v1.Node) bool {
|
func (nc *Controller) cancelPodEviction(node *v1.Node) bool {
|
||||||
zone := utilnode.GetZoneKey(node)
|
zone := nodetopology.GetZoneKey(node)
|
||||||
nc.evictorLock.Lock()
|
nc.evictorLock.Lock()
|
||||||
defer nc.evictorLock.Unlock()
|
defer nc.evictorLock.Unlock()
|
||||||
if !nc.nodeEvictionMap.setStatus(node.Name, unmarked) {
|
if !nc.nodeEvictionMap.setStatus(node.Name, unmarked) {
|
||||||
@ -1429,7 +1428,7 @@ func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.P
|
|||||||
if ok && status == evicted {
|
if ok && status == evicted {
|
||||||
// Node eviction already happened for this node.
|
// Node eviction already happened for this node.
|
||||||
// Handling immediate pod deletion.
|
// Handling immediate pod deletion.
|
||||||
_, err := nodeutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore)
|
_, err := controllerutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("unable to delete pods from node %q: %v", node.Name, err)
|
return false, fmt.Errorf("unable to delete pods from node %q: %v", node.Name, err)
|
||||||
}
|
}
|
||||||
@ -1438,7 +1437,7 @@ func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.P
|
|||||||
if !nc.nodeEvictionMap.setStatus(node.Name, toBeEvicted) {
|
if !nc.nodeEvictionMap.setStatus(node.Name, toBeEvicted) {
|
||||||
klog.V(2).Infof("node %v was unregistered in the meantime - skipping setting status", node.Name)
|
klog.V(2).Infof("node %v was unregistered in the meantime - skipping setting status", node.Name)
|
||||||
}
|
}
|
||||||
return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)), nil
|
return nc.zonePodEvictor[nodetopology.GetZoneKey(node)].Add(node.Name, string(node.UID)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStatus) bool {
|
func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStatus) bool {
|
||||||
@ -1446,17 +1445,17 @@ func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStat
|
|||||||
defer nc.evictorLock.Unlock()
|
defer nc.evictorLock.Unlock()
|
||||||
if status == v1.ConditionFalse {
|
if status == v1.ConditionFalse {
|
||||||
if !taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
|
if !taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
|
||||||
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name)
|
nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Remove(node.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if status == v1.ConditionUnknown {
|
if status == v1.ConditionUnknown {
|
||||||
if !taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
|
if !taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
|
||||||
nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name)
|
nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Remove(node.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID))
|
return nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Add(node.Name, string(node.UID))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *Controller) markNodeAsReachable(ctx context.Context, node *v1.Node) (bool, error) {
|
func (nc *Controller) markNodeAsReachable(ctx context.Context, node *v1.Node) (bool, error) {
|
||||||
@ -1472,7 +1471,7 @@ func (nc *Controller) markNodeAsReachable(ctx context.Context, node *v1.Node) (b
|
|||||||
klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name), nil
|
return nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Remove(node.Name), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone.
|
// ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone.
|
||||||
@ -1541,7 +1540,7 @@ func (nc *Controller) reconcileNodeLabels(nodeName string) error {
|
|||||||
if len(labelsToUpdate) == 0 {
|
if len(labelsToUpdate) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !nodeutil.AddOrUpdateLabelsOnNode(nc.kubeClient, labelsToUpdate, node) {
|
if !controllerutil.AddOrUpdateLabelsOnNode(nc.kubeClient, labelsToUpdate, node) {
|
||||||
return fmt.Errorf("failed update labels for node %+v", node)
|
return fmt.Errorf("failed update labels for node %+v", node)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -44,7 +44,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
"k8s.io/kubernetes/pkg/util/node"
|
"k8s.io/kubernetes/pkg/util/node"
|
||||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
@ -95,7 +95,7 @@ func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNode
|
|||||||
nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||||
uid, _ := value.UID.(string)
|
uid, _ := value.UID.(string)
|
||||||
pods, _ := nc.getPodsAssignedToNode(value.Value)
|
pods, _ := nc.getPodsAssignedToNode(value.Value)
|
||||||
nodeutil.DeletePods(context.TODO(), fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore)
|
controllerutil.DeletePods(context.TODO(), fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore)
|
||||||
_ = nc.nodeEvictionMap.setStatus(value.Value, evicted)
|
_ = nc.nodeEvictionMap.setStatus(value.Value, evicted)
|
||||||
return true, 0
|
return true, 0
|
||||||
})
|
})
|
||||||
@ -729,7 +729,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
t.Logf("listed pods %d for node %v", len(pods), value.Value)
|
t.Logf("listed pods %d for node %v", len(pods), value.Value)
|
||||||
nodeutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
|
controllerutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
|
||||||
return true, 0
|
return true, 0
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@ -889,7 +889,7 @@ func TestPodStatusChange(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
|
controllerutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
|
||||||
return true, 0
|
return true, 0
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -3810,7 +3810,7 @@ func TestTryUpdateNodeHealth(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
_, savedReadyCondition := nodeutil.GetNodeCondition(nodeController.nodeHealthMap.getDeepCopy(test.node.Name).status, v1.NodeReady)
|
_, savedReadyCondition := controllerutil.GetNodeCondition(nodeController.nodeHealthMap.getDeepCopy(test.node.Name).status, v1.NodeReady)
|
||||||
savedStatus := getStatus(savedReadyCondition)
|
savedStatus := getStatus(savedReadyCondition)
|
||||||
currentStatus := getStatus(currentReadyCondition)
|
currentStatus := getStatus(currentReadyCondition)
|
||||||
if !apiequality.Semantic.DeepEqual(currentStatus, savedStatus) {
|
if !apiequality.Semantic.DeepEqual(currentStatus, savedStatus) {
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeStatusUpdater defines a set of operations for updating the
|
// NodeStatusUpdater defines a set of operations for updating the
|
||||||
|
@ -48,7 +48,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
|
@ -36,7 +36,7 @@ import (
|
|||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||||
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
||||||
sysctltest "k8s.io/component-helpers/node/utils/sysctl/testing"
|
sysctltest "k8s.io/component-helpers/node/util/sysctl/testing"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
|
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/validation"
|
"k8s.io/apimachinery/pkg/util/validation"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
||||||
sysctltest "k8s.io/component-helpers/node/utils/sysctl/testing"
|
sysctltest "k8s.io/component-helpers/node/util/sysctl/testing"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
|
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
cloudproviderapi "k8s.io/cloud-provider/api"
|
cloudproviderapi "k8s.io/cloud-provider/api"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
kubeletapis "k8s.io/kubelet/pkg/apis"
|
kubeletapis "k8s.io/kubelet/pkg/apis"
|
||||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||||
@ -41,7 +42,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
|
||||||
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
||||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
@ -19,15 +19,16 @@ package status
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app"
|
proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app"
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
"k8s.io/kubernetes/pkg/proxy"
|
||||||
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
|
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
|
||||||
|
@ -40,7 +40,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
"k8s.io/kubernetes/pkg/proxy"
|
||||||
|
@ -44,7 +44,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
"k8s.io/kubernetes/pkg/proxy"
|
||||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
utilsysctl "k8s.io/component-helpers/node/utils/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
@ -28,13 +27,9 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -169,189 +164,3 @@ func GetNodeIP(client clientset.Interface, name string) net.IP {
|
|||||||
}
|
}
|
||||||
return nodeIP
|
return nodeIP
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeForConditionPatch struct {
|
|
||||||
Status nodeStatusForPatch `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeStatusForPatch struct {
|
|
||||||
Conditions []v1.NodeCondition `json:"conditions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNodeCondition updates specific node condition with patch operation.
|
|
||||||
func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error {
|
|
||||||
generatePatch := func(condition v1.NodeCondition) ([]byte, error) {
|
|
||||||
patch := nodeForConditionPatch{
|
|
||||||
Status: nodeStatusForPatch{
|
|
||||||
Conditions: []v1.NodeCondition{
|
|
||||||
condition,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
patchBytes, err := json.Marshal(&patch)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return patchBytes, nil
|
|
||||||
}
|
|
||||||
condition.LastHeartbeatTime = metav1.NewTime(time.Now())
|
|
||||||
patch, err := generatePatch(condition)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
_, err = c.CoreV1().Nodes().PatchStatus(context.TODO(), string(node), patch)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeForCIDRMergePatch struct {
|
|
||||||
Spec nodeSpecForMergePatch `json:"spec"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeSpecForMergePatch struct {
|
|
||||||
PodCIDR string `json:"podCIDR"`
|
|
||||||
PodCIDRs []string `json:"podCIDRs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PatchNodeCIDR patches the specified node's CIDR to the given value.
|
|
||||||
func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error {
|
|
||||||
patch := nodeForCIDRMergePatch{
|
|
||||||
Spec: nodeSpecForMergePatch{
|
|
||||||
PodCIDR: cidr,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
patchBytes, err := json.Marshal(&patch)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value.
|
|
||||||
func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error {
|
|
||||||
// set the pod cidrs list and set the old pod cidr field
|
|
||||||
patch := nodeForCIDRMergePatch{
|
|
||||||
Spec: nodeSpecForMergePatch{
|
|
||||||
PodCIDR: cidrs[0],
|
|
||||||
PodCIDRs: cidrs,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
patchBytes, err := json.Marshal(&patch)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
|
||||||
}
|
|
||||||
klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes))
|
|
||||||
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PatchNodeStatus patches node status along with objectmetadata
|
|
||||||
func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) {
|
|
||||||
patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err)
|
|
||||||
}
|
|
||||||
return updatedNode, patchBytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// preparePatchBytesforNodeStatus updates the node objectmetadata and status
|
|
||||||
func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) {
|
|
||||||
oldData, err := json.Marshal(oldNode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeStatus.Addresses is incorrectly annotated as patchStrategy=merge, which
|
|
||||||
// will cause strategicpatch.CreateTwoWayMergePatch to create an incorrect patch
|
|
||||||
// if it changed.
|
|
||||||
manuallyPatchAddresses := (len(oldNode.Status.Addresses) > 0) && !equality.Semantic.DeepEqual(oldNode.Status.Addresses, newNode.Status.Addresses)
|
|
||||||
|
|
||||||
// Reset spec to make sure only patch for Status or ObjectMeta is generated.
|
|
||||||
// Note that we don't reset ObjectMeta here, because:
|
|
||||||
// 1. This aligns with Nodes().UpdateStatus().
|
|
||||||
// 2. Some component does use this to update node annotations.
|
|
||||||
diffNode := newNode.DeepCopy()
|
|
||||||
diffNode.Spec = oldNode.Spec
|
|
||||||
if manuallyPatchAddresses {
|
|
||||||
diffNode.Status.Addresses = oldNode.Status.Addresses
|
|
||||||
}
|
|
||||||
newData, err := json.Marshal(diffNode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err)
|
|
||||||
}
|
|
||||||
if manuallyPatchAddresses {
|
|
||||||
patchBytes, err = fixupPatchForNodeStatusAddresses(patchBytes, newNode.Status.Addresses)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to fix up NodeAddresses in patch for node %q: %v", nodeName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return patchBytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fixupPatchForNodeStatusAddresses adds a replace-strategy patch for Status.Addresses to
|
|
||||||
// the existing patch
|
|
||||||
func fixupPatchForNodeStatusAddresses(patchBytes []byte, addresses []v1.NodeAddress) ([]byte, error) {
|
|
||||||
// Given patchBytes='{"status": {"conditions": [ ... ], "phase": ...}}' and
|
|
||||||
// addresses=[{"type": "InternalIP", "address": "10.0.0.1"}], we need to generate:
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "status": {
|
|
||||||
// "conditions": [ ... ],
|
|
||||||
// "phase": ...,
|
|
||||||
// "addresses": [
|
|
||||||
// {
|
|
||||||
// "type": "InternalIP",
|
|
||||||
// "address": "10.0.0.1"
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// "$patch": "replace"
|
|
||||||
// }
|
|
||||||
// ]
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
var patchMap map[string]interface{}
|
|
||||||
if err := json.Unmarshal(patchBytes, &patchMap); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
addrBytes, err := json.Marshal(addresses)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var addrArray []interface{}
|
|
||||||
if err := json.Unmarshal(addrBytes, &addrArray); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
addrArray = append(addrArray, map[string]interface{}{"$patch": "replace"})
|
|
||||||
|
|
||||||
status := patchMap["status"]
|
|
||||||
if status == nil {
|
|
||||||
status = map[string]interface{}{}
|
|
||||||
patchMap["status"] = status
|
|
||||||
}
|
|
||||||
statusMap, ok := status.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected data in patch")
|
|
||||||
}
|
|
||||||
statusMap["addresses"] = addrArray
|
|
||||||
|
|
||||||
return json.Marshal(patchMap)
|
|
||||||
}
|
|
||||||
|
@ -38,8 +38,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
@ -233,6 +233,7 @@
|
|||||||
- k8s.io/cloud-provider
|
- k8s.io/cloud-provider
|
||||||
- k8s.io/component-base
|
- k8s.io/component-base
|
||||||
- k8s.io/controller-manager
|
- k8s.io/controller-manager
|
||||||
|
- k8s.io/component-helpers
|
||||||
- k8s.io/klog
|
- k8s.io/klog
|
||||||
- k8s.io/utils
|
- k8s.io/utils
|
||||||
|
|
||||||
|
@ -1295,6 +1295,8 @@ rules:
|
|||||||
branch: master
|
branch: master
|
||||||
- repository: controller-manager
|
- repository: controller-manager
|
||||||
branch: master
|
branch: master
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: master
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
dir: staging/src/k8s.io/cloud-provider
|
dir: staging/src/k8s.io/cloud-provider
|
||||||
@ -1309,6 +1311,8 @@ rules:
|
|||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
- repository: component-base
|
- repository: component-base
|
||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.19
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
dir: staging/src/k8s.io/cloud-provider
|
dir: staging/src/k8s.io/cloud-provider
|
||||||
@ -1327,6 +1331,8 @@ rules:
|
|||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
- repository: controller-manager
|
- repository: controller-manager
|
||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.20
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
dir: staging/src/k8s.io/cloud-provider
|
dir: staging/src/k8s.io/cloud-provider
|
||||||
@ -1345,6 +1351,8 @@ rules:
|
|||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
- repository: controller-manager
|
- repository: controller-manager
|
||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.21
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
dir: staging/src/k8s.io/cloud-provider
|
dir: staging/src/k8s.io/cloud-provider
|
||||||
@ -1363,6 +1371,8 @@ rules:
|
|||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
- repository: controller-manager
|
- repository: controller-manager
|
||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.22
|
||||||
|
|
||||||
- destination: kube-controller-manager
|
- destination: kube-controller-manager
|
||||||
library: true
|
library: true
|
||||||
@ -1386,6 +1396,8 @@ rules:
|
|||||||
branch: master
|
branch: master
|
||||||
- repository: cloud-provider
|
- repository: cloud-provider
|
||||||
branch: master
|
branch: master
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: master
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
dir: staging/src/k8s.io/kube-controller-manager
|
dir: staging/src/k8s.io/kube-controller-manager
|
||||||
@ -1400,6 +1412,8 @@ rules:
|
|||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
- repository: client-go
|
- repository: client-go
|
||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.19
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
dir: staging/src/k8s.io/kube-controller-manager
|
dir: staging/src/k8s.io/kube-controller-manager
|
||||||
@ -1420,6 +1434,8 @@ rules:
|
|||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
- repository: cloud-provider
|
- repository: cloud-provider
|
||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.20
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
dir: staging/src/k8s.io/kube-controller-manager
|
dir: staging/src/k8s.io/kube-controller-manager
|
||||||
@ -1440,6 +1456,8 @@ rules:
|
|||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
- repository: cloud-provider
|
- repository: cloud-provider
|
||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.21
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
dir: staging/src/k8s.io/kube-controller-manager
|
dir: staging/src/k8s.io/kube-controller-manager
|
||||||
@ -1460,6 +1478,8 @@ rules:
|
|||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
- repository: cloud-provider
|
- repository: cloud-provider
|
||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.22
|
||||||
|
|
||||||
- destination: cluster-bootstrap
|
- destination: cluster-bootstrap
|
||||||
library: true
|
library: true
|
||||||
@ -1622,6 +1642,8 @@ rules:
|
|||||||
branch: master
|
branch: master
|
||||||
- repository: mount-utils
|
- repository: mount-utils
|
||||||
branch: master
|
branch: master
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: master
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
dir: staging/src/k8s.io/legacy-cloud-providers
|
dir: staging/src/k8s.io/legacy-cloud-providers
|
||||||
@ -1642,6 +1664,8 @@ rules:
|
|||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
- repository: component-base
|
- repository: component-base
|
||||||
branch: release-1.19
|
branch: release-1.19
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.19
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
dir: staging/src/k8s.io/legacy-cloud-providers
|
dir: staging/src/k8s.io/legacy-cloud-providers
|
||||||
@ -1664,6 +1688,8 @@ rules:
|
|||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
- repository: controller-manager
|
- repository: controller-manager
|
||||||
branch: release-1.20
|
branch: release-1.20
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.20
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
dir: staging/src/k8s.io/legacy-cloud-providers
|
dir: staging/src/k8s.io/legacy-cloud-providers
|
||||||
@ -1686,6 +1712,8 @@ rules:
|
|||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
- repository: controller-manager
|
- repository: controller-manager
|
||||||
branch: release-1.21
|
branch: release-1.21
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.21
|
||||||
- source:
|
- source:
|
||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
dir: staging/src/k8s.io/legacy-cloud-providers
|
dir: staging/src/k8s.io/legacy-cloud-providers
|
||||||
@ -1710,6 +1738,8 @@ rules:
|
|||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
- repository: mount-utils
|
- repository: mount-utils
|
||||||
branch: release-1.22
|
branch: release-1.22
|
||||||
|
- repository: component-helpers
|
||||||
|
branch: release-1.22
|
||||||
|
|
||||||
- destination: cri-api
|
- destination: cri-api
|
||||||
library: true
|
library: true
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
cloudproviderapi "k8s.io/cloud-provider/api"
|
cloudproviderapi "k8s.io/cloud-provider/api"
|
||||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -359,7 +360,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1.
|
|||||||
}
|
}
|
||||||
newNode := node.DeepCopy()
|
newNode := node.DeepCopy()
|
||||||
newNode.Status.Addresses = nodeAddresses
|
newNode.Status.Addresses = nodeAddresses
|
||||||
if _, _, err := cloudnodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode); err != nil {
|
if _, _, err := nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode); err != nil {
|
||||||
klog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
|
klog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -415,7 +416,7 @@ func (cnc *CloudNodeController) syncNode(ctx context.Context, nodeName string) e
|
|||||||
// TODO(wlan0): Move this logic to the route controller using the node taint instead of condition
|
// TODO(wlan0): Move this logic to the route controller using the node taint instead of condition
|
||||||
// Since there are node taints, do we still need this?
|
// Since there are node taints, do we still need this?
|
||||||
// This condition marks the node as unusable until routes are initialized in the cloud provider
|
// This condition marks the node as unusable until routes are initialized in the cloud provider
|
||||||
if err := cloudnodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(nodeName), v1.NodeCondition{
|
if err := nodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(nodeName), v1.NodeCondition{
|
||||||
Type: v1.NodeNetworkUnavailable,
|
Type: v1.NodeNetworkUnavailable,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
Reason: "NoRouteCreated",
|
Reason: "NoRouteCreated",
|
||||||
|
@ -36,6 +36,7 @@ import (
|
|||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
cloudproviderapi "k8s.io/cloud-provider/api"
|
cloudproviderapi "k8s.io/cloud-provider/api"
|
||||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -128,7 +129,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) {
|
|||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
// Default NodeReady status to v1.ConditionUnknown
|
// Default NodeReady status to v1.ConditionUnknown
|
||||||
status := v1.ConditionUnknown
|
status := v1.ConditionUnknown
|
||||||
if _, c := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil {
|
if _, c := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil {
|
||||||
status = c.Status
|
status = c.Status
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,8 +41,8 @@ import (
|
|||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
clientretry "k8s.io/client-go/util/retry"
|
clientretry "k8s.io/client-go/util/retry"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
|
||||||
"k8s.io/component-base/metrics/prometheus/ratelimiter"
|
"k8s.io/component-base/metrics/prometheus/ratelimiter"
|
||||||
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -290,7 +290,7 @@ func (rc *RouteController) reconcile(ctx context.Context, nodes []*v1.Node, rout
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error {
|
func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error {
|
||||||
_, condition := cloudnodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
|
_, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
|
||||||
if routesCreated && condition != nil && condition.Status == v1.ConditionFalse {
|
if routesCreated && condition != nil && condition.Status == v1.ConditionFalse {
|
||||||
klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name)
|
klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name)
|
||||||
return nil
|
return nil
|
||||||
@ -311,7 +311,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
|
|||||||
// patch in the retry loop.
|
// patch in the retry loop.
|
||||||
currentTime := metav1.Now()
|
currentTime := metav1.Now()
|
||||||
if routesCreated {
|
if routesCreated {
|
||||||
err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
err = nodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||||
Type: v1.NodeNetworkUnavailable,
|
Type: v1.NodeNetworkUnavailable,
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: "RouteCreated",
|
Reason: "RouteCreated",
|
||||||
@ -319,7 +319,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
|
|||||||
LastTransitionTime: currentTime,
|
LastTransitionTime: currentTime,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
err = nodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
|
||||||
Type: v1.NodeNetworkUnavailable,
|
Type: v1.NodeNetworkUnavailable,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
Reason: "NoRouteCreated",
|
Reason: "NoRouteCreated",
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
fakecloud "k8s.io/cloud-provider/fake"
|
fakecloud "k8s.io/cloud-provider/fake"
|
||||||
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -378,7 +378,7 @@ func TestReconcile(t *testing.T) {
|
|||||||
for _, action := range testCase.clientset.Actions() {
|
for _, action := range testCase.clientset.Actions() {
|
||||||
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
|
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
|
||||||
node := action.(core.UpdateAction).GetObject().(*v1.Node)
|
node := action.(core.UpdateAction).GetObject().(*v1.Node)
|
||||||
_, condition := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||||
if condition == nil {
|
if condition == nil {
|
||||||
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
|
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
|
||||||
} else {
|
} else {
|
||||||
|
@ -14,6 +14,7 @@ require (
|
|||||||
k8s.io/apiserver v0.0.0
|
k8s.io/apiserver v0.0.0
|
||||||
k8s.io/client-go v0.0.0
|
k8s.io/client-go v0.0.0
|
||||||
k8s.io/component-base v0.0.0
|
k8s.io/component-base v0.0.0
|
||||||
|
k8s.io/component-helpers v0.0.0
|
||||||
k8s.io/controller-manager v0.0.0
|
k8s.io/controller-manager v0.0.0
|
||||||
k8s.io/klog/v2 v2.30.0
|
k8s.io/klog/v2 v2.30.0
|
||||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b
|
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b
|
||||||
@ -26,5 +27,6 @@ replace (
|
|||||||
k8s.io/client-go => ../client-go
|
k8s.io/client-go => ../client-go
|
||||||
k8s.io/cloud-provider => ../cloud-provider
|
k8s.io/cloud-provider => ../cloud-provider
|
||||||
k8s.io/component-base => ../component-base
|
k8s.io/component-base => ../component-base
|
||||||
|
k8s.io/component-helpers => ../component-helpers
|
||||||
k8s.io/controller-manager => ../controller-manager
|
k8s.io/controller-manager => ../controller-manager
|
||||||
)
|
)
|
||||||
|
58
staging/src/k8s.io/component-helpers/node/util/cidr.go
Normal file
58
staging/src/k8s.io/component-helpers/node/util/cidr.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nodeForCIDRMergePatch struct {
|
||||||
|
Spec nodeSpecForMergePatch `json:"spec"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeSpecForMergePatch struct {
|
||||||
|
PodCIDR string `json:"podCIDR"`
|
||||||
|
PodCIDRs []string `json:"podCIDRs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value.
|
||||||
|
func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error {
|
||||||
|
// set the pod cidrs list and set the old pod cidr field
|
||||||
|
patch := nodeForCIDRMergePatch{
|
||||||
|
Spec: nodeSpecForMergePatch{
|
||||||
|
PodCIDR: cidrs[0],
|
||||||
|
PodCIDRs: cidrs,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
patchBytes, err := json.Marshal(&patch)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
||||||
|
}
|
||||||
|
klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes))
|
||||||
|
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
||||||
|
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2019 The Kubernetes Authors.
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package helpers
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2019 The Kubernetes Authors.
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@ -14,13 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
package util
|
||||||
|
|
||||||
NOTE: the contents of this file has been copied from k8s.io/kubernetes/pkg/util/node. The reason for duplicating this code is to remove
|
|
||||||
dependencies for cloud controller manager.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package helpers
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
@ -19,7 +19,7 @@ package testing
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"k8s.io/component-helpers/node/utils/sysctl"
|
"k8s.io/component-helpers/node/util/sysctl"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fake is a map-backed implementation of sysctl.Interface, for testing/mocking.
|
// Fake is a map-backed implementation of sysctl.Interface, for testing/mocking.
|
@ -17,6 +17,7 @@ replace (
|
|||||||
k8s.io/client-go => ../client-go
|
k8s.io/client-go => ../client-go
|
||||||
k8s.io/cloud-provider => ../cloud-provider
|
k8s.io/cloud-provider => ../cloud-provider
|
||||||
k8s.io/component-base => ../component-base
|
k8s.io/component-base => ../component-base
|
||||||
|
k8s.io/component-helpers => ../component-helpers
|
||||||
k8s.io/controller-manager => ../controller-manager
|
k8s.io/controller-manager => ../controller-manager
|
||||||
k8s.io/kube-controller-manager => ../kube-controller-manager
|
k8s.io/kube-controller-manager => ../kube-controller-manager
|
||||||
)
|
)
|
||||||
|
@ -50,6 +50,7 @@ replace (
|
|||||||
k8s.io/client-go => ../client-go
|
k8s.io/client-go => ../client-go
|
||||||
k8s.io/cloud-provider => ../cloud-provider
|
k8s.io/cloud-provider => ../cloud-provider
|
||||||
k8s.io/component-base => ../component-base
|
k8s.io/component-base => ../component-base
|
||||||
|
k8s.io/component-helpers => ../component-helpers
|
||||||
k8s.io/controller-manager => ../controller-manager
|
k8s.io/controller-manager => ../controller-manager
|
||||||
k8s.io/csi-translation-lib => ../csi-translation-lib
|
k8s.io/csi-translation-lib => ../csi-translation-lib
|
||||||
k8s.io/legacy-cloud-providers => ../legacy-cloud-providers
|
k8s.io/legacy-cloud-providers => ../legacy-cloud-providers
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
node2 "k8s.io/kubernetes/pkg/util/node"
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu
|
|||||||
newNode := node.DeepCopy()
|
newNode := node.DeepCopy()
|
||||||
newNode.Labels[v1.LabelOSStable] = "dummyOS"
|
newNode.Labels[v1.LabelOSStable] = "dummyOS"
|
||||||
newNode.Labels[v1.LabelArchStable] = "dummyArch"
|
newNode.Labels[v1.LabelArchStable] = "dummyArch"
|
||||||
_, _, err := node2.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
|
_, _, err := nodeutil.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
// Restart kubelet
|
// Restart kubelet
|
||||||
startKubelet()
|
startKubelet()
|
||||||
@ -70,7 +70,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu
|
|||||||
newNode := node.DeepCopy()
|
newNode := node.DeepCopy()
|
||||||
newNode.Labels[v1.LabelOSStable] = "dummyOS"
|
newNode.Labels[v1.LabelOSStable] = "dummyOS"
|
||||||
newNode.Labels[v1.LabelArchStable] = "dummyArch"
|
newNode.Labels[v1.LabelArchStable] = "dummyArch"
|
||||||
_, _, err := node2.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
|
_, _, err := nodeutil.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute)
|
err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config represents the test configuration that is being run
|
// Config represents the test configuration that is being run
|
||||||
@ -150,7 +150,7 @@ func (o *Observer) monitor() {
|
|||||||
nodeInformer := sharedInformer.Core().V1().Nodes().Informer()
|
nodeInformer := sharedInformer.Core().V1().Nodes().Informer()
|
||||||
|
|
||||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) (err error) {
|
AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) (err error) {
|
||||||
name := node.GetName()
|
name := node.GetName()
|
||||||
if node.Spec.PodCIDR != "" {
|
if node.Spec.PodCIDR != "" {
|
||||||
// ignore nodes that have PodCIDR (might be hold over from previous runs that did not get cleaned up)
|
// ignore nodes that have PodCIDR (might be hold over from previous runs that did not get cleaned up)
|
||||||
@ -162,7 +162,7 @@ func (o *Observer) monitor() {
|
|||||||
o.numAdded = o.numAdded + 1
|
o.numAdded = o.numAdded + 1
|
||||||
return
|
return
|
||||||
}),
|
}),
|
||||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) (err error) {
|
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) (err error) {
|
||||||
name := newNode.GetName()
|
name := newNode.GetName()
|
||||||
nTime, found := o.timing[name]
|
nTime, found := o.timing[name]
|
||||||
if !found {
|
if !found {
|
||||||
|
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
@ -2011,8 +2011,9 @@ k8s.io/component-helpers/apps/poddisruptionbudget
|
|||||||
k8s.io/component-helpers/auth/rbac/reconciliation
|
k8s.io/component-helpers/auth/rbac/reconciliation
|
||||||
k8s.io/component-helpers/auth/rbac/validation
|
k8s.io/component-helpers/auth/rbac/validation
|
||||||
k8s.io/component-helpers/node/topology
|
k8s.io/component-helpers/node/topology
|
||||||
k8s.io/component-helpers/node/utils/sysctl
|
k8s.io/component-helpers/node/util
|
||||||
k8s.io/component-helpers/node/utils/sysctl/testing
|
k8s.io/component-helpers/node/util/sysctl
|
||||||
|
k8s.io/component-helpers/node/util/sysctl/testing
|
||||||
k8s.io/component-helpers/scheduling/corev1
|
k8s.io/component-helpers/scheduling/corev1
|
||||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||||
k8s.io/component-helpers/storage/ephemeral
|
k8s.io/component-helpers/storage/ephemeral
|
||||||
|
Loading…
Reference in New Issue
Block a user