diff --git a/cmd/kube-proxy/app/conntrack.go b/cmd/kube-proxy/app/conntrack.go index 7d8de978a16..f1697d3cac6 100644 --- a/cmd/kube-proxy/app/conntrack.go +++ b/cmd/kube-proxy/app/conntrack.go @@ -25,7 +25,7 @@ import ( "k8s.io/klog/v2" "k8s.io/mount-utils" - "k8s.io/component-helpers/node/utils/sysctl" + "k8s.io/component-helpers/node/util/sysctl" ) // Conntracker is an interface to the global sysctl. Descriptions of the various diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index b0537c504bd..c0da5b56d50 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -48,7 +48,7 @@ import ( toolswatch "k8s.io/client-go/tools/watch" "k8s.io/component-base/configz" "k8s.io/component-base/metrics" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" "k8s.io/kubernetes/pkg/proxy" proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config" "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" diff --git a/cmd/kubemark/hollow-node.go b/cmd/kubemark/hollow-node.go index ee6690ff82e..c0e08143f2a 100644 --- a/cmd/kubemark/hollow-node.go +++ b/cmd/kubemark/hollow-node.go @@ -41,7 +41,7 @@ import ( _ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration "k8s.io/component-base/version" "k8s.io/component-base/version/verflag" - fakesysctl "k8s.io/component-helpers/node/utils/sysctl/testing" + fakesysctl "k8s.io/component-helpers/node/util/sysctl/testing" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/cluster/ports" diff --git a/hack/.import-aliases b/hack/.import-aliases index a96af5f6aa8..ea5bbde09b0 100644 --- a/hack/.import-aliases +++ b/hack/.import-aliases @@ -40,6 +40,8 @@ "k8s.io/api/storage/v1alpha1": "storagev1alpha1", "k8s.io/api/storage/v1beta1": "storagev1beta1", "k8s.io/apimachinery/pkg/api/errors": "apierrors", + "k8s.io/component-helpers/node/util": "nodeutil", + "k8s.io/kubernetes/pkg/controller/util/node": "controllerutil", "k8s.io/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1", "k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1": "controllerconfigv1alpha1", "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1": "kubeletconfigv1beta1", diff --git a/pkg/controller/nodeipam/ipam/adapter.go b/pkg/controller/nodeipam/ipam/adapter.go index b722a1bdc74..e3214be114e 100644 --- a/pkg/controller/nodeipam/ipam/adapter.go +++ b/pkg/controller/nodeipam/ipam/adapter.go @@ -34,7 +34,7 @@ import ( clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" - nodeutil "k8s.io/kubernetes/pkg/util/node" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/legacy-cloud-providers/gce" "k8s.io/metrics/pkg/client/clientset/versioned/scheme" ) diff --git a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 97815e58e73..99187afdd89 100644 --- a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -43,8 +43,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" cloudprovider "k8s.io/cloud-provider" - nodeutil "k8s.io/kubernetes/pkg/controller/util/node" - utilnode "k8s.io/kubernetes/pkg/util/node" + nodeutil "k8s.io/component-helpers/node/util" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" utiltaints "k8s.io/kubernetes/pkg/util/taints" "k8s.io/legacy-cloud-providers/gce" netutils "k8s.io/utils/net" @@ -112,21 +112,21 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter } nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: nodeutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR), - UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + AddFunc: controllerutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR), + UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { if newNode.Spec.PodCIDR == "" { return ca.AllocateOrOccupyCIDR(newNode) } // Even if PodCIDR is assigned, but NetworkUnavailable condition is // set to true, we need to process the node to set the condition. networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule} - _, cond := nodeutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable) + _, cond := controllerutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable) if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) { return ca.AllocateOrOccupyCIDR(newNode) } return nil }), - DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR), + DeleteFunc: controllerutil.CreateDeleteNodeHandler(ca.ReleaseCIDR), }) klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName()) @@ -258,11 +258,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { cidrStrings, err := ca.cloud.AliasRangesByProviderID(node.Spec.ProviderID) if err != nil { - nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") + controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to get cidr(s) from provider: %v", err) } if len(cidrStrings) == 0 { - nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") + controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name) } //Can have at most 2 ips (one for v4 and one for v6) @@ -290,19 +290,19 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { // See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248 } for i := 0; i < cidrUpdateRetries; i++ { - if err = utilnode.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil { + if err = nodeutil.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil { klog.InfoS("Set the node PodCIDRs", "nodeName", node.Name, "cidrStrings", cidrStrings) break } } } if err != nil { - nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed") + controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed") klog.ErrorS(err, "Failed to update the node PodCIDR after multiple attempts", "nodeName", node.Name, "cidrStrings", cidrStrings) return err } - err = utilnode.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{ + err = nodeutil.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse, Reason: "RouteCreated", diff --git a/pkg/controller/nodeipam/ipam/controller_legacyprovider.go b/pkg/controller/nodeipam/ipam/controller_legacyprovider.go index 61b40ec248e..4e1af6091e5 100644 --- a/pkg/controller/nodeipam/ipam/controller_legacyprovider.go +++ b/pkg/controller/nodeipam/ipam/controller_legacyprovider.go @@ -35,7 +35,7 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync" - nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" "k8s.io/legacy-cloud-providers/gce" ) @@ -142,9 +142,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error { } nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: nodeutil.CreateAddNodeHandler(c.onAdd), - UpdateFunc: nodeutil.CreateUpdateNodeHandler(c.onUpdate), - DeleteFunc: nodeutil.CreateDeleteNodeHandler(c.onDelete), + AddFunc: controllerutil.CreateAddNodeHandler(c.onAdd), + UpdateFunc: controllerutil.CreateUpdateNodeHandler(c.onUpdate), + DeleteFunc: controllerutil.CreateDeleteNodeHandler(c.onDelete), }) return nil diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index e5603341ab8..3cf6794b228 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -36,9 +36,9 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" - nodeutil "k8s.io/kubernetes/pkg/controller/util/node" - utilnode "k8s.io/kubernetes/pkg/util/node" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" ) // cidrs are reserved, then node resource is patched with them @@ -135,8 +135,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No } nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR), - UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + AddFunc: controllerutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR), + UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { // If the PodCIDRs list is not empty we either: // - already processed a Node that already had CIDRs after NC restarted // (cidr is marked as used), @@ -161,7 +161,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No } return nil }), - DeleteFunc: nodeutil.CreateDeleteNodeHandler(ra.ReleaseCIDR), + DeleteFunc: controllerutil.CreateDeleteNodeHandler(ra.ReleaseCIDR), }) return ra, nil @@ -268,7 +268,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { podCIDR, err := r.cidrSets[idx].AllocateNext() if err != nil { r.removeNodeFromProcessing(node.Name) - nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable") + controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr from cluster cidr at idx:%v: %v", idx, err) } allocated.allocatedCIDRs[idx] = podCIDR @@ -370,14 +370,14 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error { // If we reached here, it means that the node has no CIDR currently assigned. So we set it. for i := 0; i < cidrUpdateRetries; i++ { - if err = utilnode.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil { + if err = nodeutil.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil { klog.Infof("Set node %v PodCIDR to %v", node.Name, cidrsString) return nil } } // failed release back to the pool klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, cidrsString, err) - nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") + controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") // We accept the fact that we may leak CIDRs here. This is safer than releasing // them in case when we don't know if request went through. // NodeController restart will return all falsely allocated CIDRs to the pool. diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 34bf5951e59..b9a37311e04 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -51,11 +51,11 @@ import ( "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" "k8s.io/component-base/metrics/prometheus/ratelimiter" - utilnode "k8s.io/component-helpers/node/topology" + nodetopology "k8s.io/component-helpers/node/topology" kubeletapis "k8s.io/kubelet/pkg/apis" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" - nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" ) @@ -487,15 +487,15 @@ func NewNodeLifecycleController( nodeGetter := func(name string) (*v1.Node, error) { return nodeLister.Get(name) } nc.taintManager = scheduler.NewNoExecuteTaintManager(ctx, kubeClient, podGetter, nodeGetter, nc.getPodsAssignedToNode) nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error { + AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error { nc.taintManager.NodeUpdated(nil, node) return nil }), - UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error { + UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error { nc.taintManager.NodeUpdated(oldNode, newNode) return nil }), - DeleteFunc: nodeutil.CreateDeleteNodeHandler(func(node *v1.Node) error { + DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error { nc.taintManager.NodeUpdated(node, nil) return nil }), @@ -504,16 +504,16 @@ func NewNodeLifecycleController( klog.Infof("Controller will reconcile labels.") nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error { + AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error { nc.nodeUpdateQueue.Add(node.Name) nc.nodeEvictionMap.registerNode(node.Name) return nil }), - UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { nc.nodeUpdateQueue.Add(newNode.Name) return nil }), - DeleteFunc: nodeutil.CreateDeleteNodeHandler(func(node *v1.Node) error { + DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error { nc.nodesToRetry.Delete(node.Name) nc.nodeEvictionMap.unregisterNode(node.Name) return nil @@ -657,7 +657,7 @@ func (nc *Controller) doNoScheduleTaintingPass(ctx context.Context, nodeName str if len(taintsToAdd) == 0 && len(taintsToDel) == 0 { return nil } - if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, taintsToAdd, taintsToDel, node) { + if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, taintsToAdd, taintsToDel, node) { return fmt.Errorf("failed to swap taints of node %+v", node) } return nil @@ -678,7 +678,7 @@ func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) { // retry in 50 millisecond return false, 50 * time.Millisecond } - _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + _, condition := controllerutil.GetNodeCondition(&node.Status, v1.NodeReady) // Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive. taintToAdd := v1.Taint{} oppositeTaint := v1.Taint{} @@ -694,11 +694,10 @@ func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) { klog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value) return true, 0 } - - result := nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node) + result := controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node) if result { //count the evictionsNumber - zone := utilnode.GetZoneKey(node) + zone := nodetopology.GetZoneKey(node) evictionsNumber.WithLabelValues(zone).Inc() } @@ -725,7 +724,7 @@ func (nc *Controller) doEvictionPass(ctx context.Context) { utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %v", value.Value, err)) return false, 0 } - remaining, err := nodeutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore) + remaining, err := controllerutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore) if err != nil { // We are not setting eviction status here. // New pods will be handled by zonePodEvictor retry @@ -741,7 +740,7 @@ func (nc *Controller) doEvictionPass(ctx context.Context) { } if node != nil { - zone := utilnode.GetZoneKey(node) + zone := nodetopology.GetZoneKey(node) evictionsNumber.WithLabelValues(zone).Inc() } @@ -768,7 +767,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error { for i := range added { klog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name) - nodeutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name)) + controllerutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name)) nc.knownNodeSet[added[i].Name] = added[i] nc.addPodEvictorForNewZone(added[i]) if nc.runTaintManager { @@ -780,7 +779,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error { for i := range deleted { klog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name) - nodeutil.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name)) + controllerutil.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name)) delete(nc.knownNodeSet, deleted[i].Name) } @@ -810,7 +809,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error { // Some nodes may be excluded from disruption checking if !isNodeExcludedFromDisruptionChecks(node) { - zoneToNodeConditions[utilnode.GetZoneKey(node)] = append(zoneToNodeConditions[utilnode.GetZoneKey(node)], currentReadyCondition) + zoneToNodeConditions[nodetopology.GetZoneKey(node)] = append(zoneToNodeConditions[nodetopology.GetZoneKey(node)], currentReadyCondition) } if currentReadyCondition != nil { @@ -837,10 +836,10 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error { switch { case currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue: // Report node event only once when status changed. - nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady") + controllerutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady") fallthrough case needsRetry && observedReadyCondition.Status != v1.ConditionTrue: - if err = nodeutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, node.Name); err != nil { + if err = controllerutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, node.Name); err != nil { utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err)) nc.nodesToRetry.Store(node.Name, struct{}{}) continue @@ -862,7 +861,7 @@ func (nc *Controller) processTaintBaseEviction(ctx context.Context, node *v1.Nod // We want to update the taint straight away if Node is already tainted with the UnreachableTaint if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) { taintToAdd := *NotReadyTaintTemplate - if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) { + if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) { klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") } } else if nc.markNodeForTainting(node, v1.ConditionFalse) { @@ -875,7 +874,7 @@ func (nc *Controller) processTaintBaseEviction(ctx context.Context, node *v1.Nod // We want to update the taint straight away if Node is already tainted with the UnreachableTaint if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) { taintToAdd := *UnreachableTaintTemplate - if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { + if !controllerutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { klog.Errorf("Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle.") } } else if nc.markNodeForTainting(node, v1.ConditionUnknown) { @@ -962,7 +961,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t var gracePeriod time.Duration var observedReadyCondition v1.NodeCondition - _, currentReadyCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentReadyCondition := controllerutil.GetNodeCondition(&node.Status, v1.NodeReady) if currentReadyCondition == nil { // If ready condition is nil, then kubelet (or nodecontroller) never posted node status. // A fake ready condition is created, where LastHeartbeatTime and LastTransitionTime is set @@ -1005,7 +1004,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t var savedCondition *v1.NodeCondition var savedLease *coordv1.Lease if nodeHealth != nil { - _, savedCondition = nodeutil.GetNodeCondition(nodeHealth.status, v1.NodeReady) + _, savedCondition = controllerutil.GetNodeCondition(nodeHealth.status, v1.NodeReady) savedLease = nodeHealth.lease } @@ -1077,7 +1076,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t nowTimestamp := nc.now() for _, nodeConditionType := range nodeConditionTypes { - _, currentCondition := nodeutil.GetNodeCondition(&node.Status, nodeConditionType) + _, currentCondition := controllerutil.GetNodeCondition(&node.Status, nodeConditionType) if currentCondition == nil { klog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name) node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ @@ -1100,7 +1099,7 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t } } // We need to update currentReadyCondition due to its value potentially changed. - _, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) + _, currentReadyCondition = controllerutil.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) { if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{}); err != nil { @@ -1275,7 +1274,7 @@ func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) { return } - _, currentReadyCondition := nodeutil.GetNodeCondition(nodeHealth.status, v1.NodeReady) + _, currentReadyCondition := controllerutil.GetNodeCondition(nodeHealth.status, v1.NodeReady) if currentReadyCondition == nil { // Lack of NodeReady condition may only happen after node addition (or if it will be maliciously deleted). // In both cases, the pod will be handled correctly (evicted if needed) during processing @@ -1295,7 +1294,7 @@ func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) { } if currentReadyCondition.Status != v1.ConditionTrue { - if err := nodeutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, nodeName); err != nil { + if err := controllerutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, nodeName); err != nil { klog.Warningf("Unable to mark pod %+v NotReady on node %v: %v.", podItem, nodeName, err) nc.podUpdateQueue.AddRateLimited(podItem) } @@ -1339,7 +1338,7 @@ func (nc *Controller) classifyNodes(allNodes []*v1.Node) (added, deleted, newZon added = append(added, allNodes[i]) } else { // Currently, we only consider new zone as updated. - zone := utilnode.GetZoneKey(allNodes[i]) + zone := nodetopology.GetZoneKey(allNodes[i]) if _, found := nc.zoneStates[zone]; !found { newZoneRepresentatives = append(newZoneRepresentatives, allNodes[i]) } @@ -1382,7 +1381,7 @@ func (nc *Controller) ReducedQPSFunc(nodeNum int) float32 { func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) { nc.evictorLock.Lock() defer nc.evictorLock.Unlock() - zone := utilnode.GetZoneKey(node) + zone := nodetopology.GetZoneKey(node) if _, found := nc.zoneStates[zone]; !found { nc.zoneStates[zone] = stateInitial if !nc.runTaintManager { @@ -1403,7 +1402,7 @@ func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) { // cancelPodEviction removes any queued evictions, typically because the node is available again. It // returns true if an eviction was queued. func (nc *Controller) cancelPodEviction(node *v1.Node) bool { - zone := utilnode.GetZoneKey(node) + zone := nodetopology.GetZoneKey(node) nc.evictorLock.Lock() defer nc.evictorLock.Unlock() if !nc.nodeEvictionMap.setStatus(node.Name, unmarked) { @@ -1429,7 +1428,7 @@ func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.P if ok && status == evicted { // Node eviction already happened for this node. // Handling immediate pod deletion. - _, err := nodeutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore) + _, err := controllerutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore) if err != nil { return false, fmt.Errorf("unable to delete pods from node %q: %v", node.Name, err) } @@ -1438,7 +1437,7 @@ func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.P if !nc.nodeEvictionMap.setStatus(node.Name, toBeEvicted) { klog.V(2).Infof("node %v was unregistered in the meantime - skipping setting status", node.Name) } - return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)), nil + return nc.zonePodEvictor[nodetopology.GetZoneKey(node)].Add(node.Name, string(node.UID)), nil } func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStatus) bool { @@ -1446,17 +1445,17 @@ func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStat defer nc.evictorLock.Unlock() if status == v1.ConditionFalse { if !taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) { - nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name) + nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Remove(node.Name) } } if status == v1.ConditionUnknown { if !taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) { - nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name) + nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Remove(node.Name) } } - return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)) + return nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Add(node.Name, string(node.UID)) } func (nc *Controller) markNodeAsReachable(ctx context.Context, node *v1.Node) (bool, error) { @@ -1472,7 +1471,7 @@ func (nc *Controller) markNodeAsReachable(ctx context.Context, node *v1.Node) (b klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) return false, err } - return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name), nil + return nc.zoneNoExecuteTainter[nodetopology.GetZoneKey(node)].Remove(node.Name), nil } // ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone. @@ -1541,7 +1540,7 @@ func (nc *Controller) reconcileNodeLabels(nodeName string) error { if len(labelsToUpdate) == 0 { return nil } - if !nodeutil.AddOrUpdateLabelsOnNode(nc.kubeClient, labelsToUpdate, node) { + if !controllerutil.AddOrUpdateLabelsOnNode(nc.kubeClient, labelsToUpdate, node) { return fmt.Errorf("failed update labels for node %+v", node) } return nil diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index 506e061e235..41fc78cf0d3 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -44,7 +44,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" "k8s.io/kubernetes/pkg/controller/testutil" - nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/utils/pointer" @@ -95,7 +95,7 @@ func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNode nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) { uid, _ := value.UID.(string) pods, _ := nc.getPodsAssignedToNode(value.Value) - nodeutil.DeletePods(context.TODO(), fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore) + controllerutil.DeletePods(context.TODO(), fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore) _ = nc.nodeEvictionMap.setStatus(value.Value, evicted) return true, 0 }) @@ -729,7 +729,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { t.Errorf("unexpected error: %v", err) } t.Logf("listed pods %d for node %v", len(pods), value.Value) - nodeutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister()) + controllerutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister()) return true, 0 }) } else { @@ -889,7 +889,7 @@ func TestPodStatusChange(t *testing.T) { if err != nil { t.Errorf("unexpected error: %v", err) } - nodeutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore) + controllerutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore) return true, 0 }) } @@ -3810,7 +3810,7 @@ func TestTryUpdateNodeHealth(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - _, savedReadyCondition := nodeutil.GetNodeCondition(nodeController.nodeHealthMap.getDeepCopy(test.node.Name).status, v1.NodeReady) + _, savedReadyCondition := controllerutil.GetNodeCondition(nodeController.nodeHealthMap.getDeepCopy(test.node.Name).status, v1.NodeReady) savedStatus := getStatus(savedReadyCondition) currentStatus := getStatus(currentReadyCondition) if !apiequality.Semantic.DeepEqual(currentStatus, savedStatus) { diff --git a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index 5acae4ac46f..9396872d570 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -26,8 +26,8 @@ import ( "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" - nodeutil "k8s.io/kubernetes/pkg/util/node" ) // NodeStatusUpdater defines a set of operations for updating the diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 09bd752d556..3892bae081d 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -48,7 +48,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" kubefeatures "k8s.io/kubernetes/pkg/features" diff --git a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index 73a6cb6e8c5..d069336011a 100644 --- a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -36,7 +36,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" utilnet "k8s.io/apimachinery/pkg/util/net" utilsets "k8s.io/apimachinery/pkg/util/sets" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" "k8s.io/klog/v2" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" diff --git a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go index f71f9c8c075..e94f93726f5 100644 --- a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go +++ b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go @@ -31,7 +31,7 @@ import ( "github.com/stretchr/testify/mock" utilsets "k8s.io/apimachinery/pkg/util/sets" - sysctltest "k8s.io/component-helpers/node/utils/sysctl/testing" + sysctltest "k8s.io/component-helpers/node/util/sysctl/testing" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" diff --git a/pkg/kubelet/dockershim/network/plugins.go b/pkg/kubelet/dockershim/network/plugins.go index b4a500bac0f..a3eea046677 100644 --- a/pkg/kubelet/dockershim/network/plugins.go +++ b/pkg/kubelet/dockershim/network/plugins.go @@ -31,7 +31,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" "k8s.io/klog/v2" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" diff --git a/pkg/kubelet/dockershim/network/testing/plugins_test.go b/pkg/kubelet/dockershim/network/testing/plugins_test.go index 1a65d825fc5..3a6cc61d77a 100644 --- a/pkg/kubelet/dockershim/network/testing/plugins_test.go +++ b/pkg/kubelet/dockershim/network/testing/plugins_test.go @@ -25,7 +25,7 @@ import ( "testing" utilsets "k8s.io/apimachinery/pkg/util/sets" - sysctltest "k8s.io/component-helpers/node/utils/sysctl/testing" + sysctltest "k8s.io/component-helpers/node/util/sysctl/testing" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 583c5a1f883..39d0099d932 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" cloudproviderapi "k8s.io/cloud-provider/api" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" kubeletapis "k8s.io/kubelet/pkg/apis" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" @@ -41,7 +42,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/nodestatus" "k8s.io/kubernetes/pkg/kubelet/util" - nodeutil "k8s.io/kubernetes/pkg/util/node" taintutil "k8s.io/kubernetes/pkg/util/taints" volutil "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/pkg/kubelet/kubeletconfig/status/status.go b/pkg/kubelet/kubeletconfig/status/status.go index aad5c0b395d..5b7576b7fc0 100644 --- a/pkg/kubelet/kubeletconfig/status/status.go +++ b/pkg/kubelet/kubeletconfig/status/status.go @@ -19,15 +19,16 @@ package status import ( "context" "fmt" - "k8s.io/klog/v2" "sync" + "k8s.io/klog/v2" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/kubernetes/pkg/kubelet/metrics" - nodeutil "k8s.io/kubernetes/pkg/util/node" ) const ( diff --git a/pkg/kubemark/hollow_proxy.go b/pkg/kubemark/hollow_proxy.go index 390204cc478..8adcbc5f039 100644 --- a/pkg/kubemark/hollow_proxy.go +++ b/pkg/kubemark/hollow_proxy.go @@ -25,7 +25,7 @@ import ( clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/events" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app" "k8s.io/kubernetes/pkg/proxy" proxyconfig "k8s.io/kubernetes/pkg/proxy/config" diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index bb88828cf89..b7739e756cf 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -40,7 +40,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/events" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 9211bfda1ba..b932da2bf5b 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -44,7 +44,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/events" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" diff --git a/pkg/proxy/util/utils.go b/pkg/proxy/util/utils.go index 5cee78ccdb0..27d343b11fe 100644 --- a/pkg/proxy/util/utils.go +++ b/pkg/proxy/util/utils.go @@ -31,7 +31,7 @@ import ( utilrand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/events" - utilsysctl "k8s.io/component-helpers/node/utils/sysctl" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" netutils "k8s.io/utils/net" diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index 3815a1d0ce0..aac738c200c 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -18,7 +18,6 @@ package node import ( "context" - "encoding/json" "fmt" "net" "os" @@ -28,13 +27,9 @@ import ( "k8s.io/klog/v2" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" netutils "k8s.io/utils/net" ) @@ -169,189 +164,3 @@ func GetNodeIP(client clientset.Interface, name string) net.IP { } return nodeIP } - -type nodeForConditionPatch struct { - Status nodeStatusForPatch `json:"status"` -} - -type nodeStatusForPatch struct { - Conditions []v1.NodeCondition `json:"conditions"` -} - -// SetNodeCondition updates specific node condition with patch operation. -func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error { - generatePatch := func(condition v1.NodeCondition) ([]byte, error) { - patch := nodeForConditionPatch{ - Status: nodeStatusForPatch{ - Conditions: []v1.NodeCondition{ - condition, - }, - }, - } - patchBytes, err := json.Marshal(&patch) - if err != nil { - return nil, err - } - return patchBytes, nil - } - condition.LastHeartbeatTime = metav1.NewTime(time.Now()) - patch, err := generatePatch(condition) - if err != nil { - return nil - } - _, err = c.CoreV1().Nodes().PatchStatus(context.TODO(), string(node), patch) - return err -} - -type nodeForCIDRMergePatch struct { - Spec nodeSpecForMergePatch `json:"spec"` -} - -type nodeSpecForMergePatch struct { - PodCIDR string `json:"podCIDR"` - PodCIDRs []string `json:"podCIDRs,omitempty"` -} - -// PatchNodeCIDR patches the specified node's CIDR to the given value. -func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error { - patch := nodeForCIDRMergePatch{ - Spec: nodeSpecForMergePatch{ - PodCIDR: cidr, - }, - } - patchBytes, err := json.Marshal(&patch) - if err != nil { - return fmt.Errorf("failed to json.Marshal CIDR: %v", err) - } - - if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return fmt.Errorf("failed to patch node CIDR: %v", err) - } - return nil -} - -// PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value. -func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error { - // set the pod cidrs list and set the old pod cidr field - patch := nodeForCIDRMergePatch{ - Spec: nodeSpecForMergePatch{ - PodCIDR: cidrs[0], - PodCIDRs: cidrs, - }, - } - - patchBytes, err := json.Marshal(&patch) - if err != nil { - return fmt.Errorf("failed to json.Marshal CIDR: %v", err) - } - klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) - if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return fmt.Errorf("failed to patch node CIDR: %v", err) - } - return nil -} - -// PatchNodeStatus patches node status along with objectmetadata -func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) { - patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode) - if err != nil { - return nil, nil, err - } - - updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") - if err != nil { - return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) - } - return updatedNode, patchBytes, nil -} - -// preparePatchBytesforNodeStatus updates the node objectmetadata and status -func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) { - oldData, err := json.Marshal(oldNode) - if err != nil { - return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err) - } - - // NodeStatus.Addresses is incorrectly annotated as patchStrategy=merge, which - // will cause strategicpatch.CreateTwoWayMergePatch to create an incorrect patch - // if it changed. - manuallyPatchAddresses := (len(oldNode.Status.Addresses) > 0) && !equality.Semantic.DeepEqual(oldNode.Status.Addresses, newNode.Status.Addresses) - - // Reset spec to make sure only patch for Status or ObjectMeta is generated. - // Note that we don't reset ObjectMeta here, because: - // 1. This aligns with Nodes().UpdateStatus(). - // 2. Some component does use this to update node annotations. - diffNode := newNode.DeepCopy() - diffNode.Spec = oldNode.Spec - if manuallyPatchAddresses { - diffNode.Status.Addresses = oldNode.Status.Addresses - } - newData, err := json.Marshal(diffNode) - if err != nil { - return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err) - } - - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) - if err != nil { - return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err) - } - if manuallyPatchAddresses { - patchBytes, err = fixupPatchForNodeStatusAddresses(patchBytes, newNode.Status.Addresses) - if err != nil { - return nil, fmt.Errorf("failed to fix up NodeAddresses in patch for node %q: %v", nodeName, err) - } - } - - return patchBytes, nil -} - -// fixupPatchForNodeStatusAddresses adds a replace-strategy patch for Status.Addresses to -// the existing patch -func fixupPatchForNodeStatusAddresses(patchBytes []byte, addresses []v1.NodeAddress) ([]byte, error) { - // Given patchBytes='{"status": {"conditions": [ ... ], "phase": ...}}' and - // addresses=[{"type": "InternalIP", "address": "10.0.0.1"}], we need to generate: - // - // { - // "status": { - // "conditions": [ ... ], - // "phase": ..., - // "addresses": [ - // { - // "type": "InternalIP", - // "address": "10.0.0.1" - // }, - // { - // "$patch": "replace" - // } - // ] - // } - // } - - var patchMap map[string]interface{} - if err := json.Unmarshal(patchBytes, &patchMap); err != nil { - return nil, err - } - - addrBytes, err := json.Marshal(addresses) - if err != nil { - return nil, err - } - var addrArray []interface{} - if err := json.Unmarshal(addrBytes, &addrArray); err != nil { - return nil, err - } - addrArray = append(addrArray, map[string]interface{}{"$patch": "replace"}) - - status := patchMap["status"] - if status == nil { - status = map[string]interface{}{} - patchMap["status"] = status - } - statusMap, ok := status.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("unexpected data in patch") - } - statusMap["addresses"] = addrArray - - return json.Marshal(patchMap) -} diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index d7c709eb5ba..14f13e5d1ef 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -38,8 +38,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" - nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) diff --git a/staging/publishing/import-restrictions.yaml b/staging/publishing/import-restrictions.yaml index bf8776c1e1e..0b4b9daed83 100644 --- a/staging/publishing/import-restrictions.yaml +++ b/staging/publishing/import-restrictions.yaml @@ -233,6 +233,7 @@ - k8s.io/cloud-provider - k8s.io/component-base - k8s.io/controller-manager + - k8s.io/component-helpers - k8s.io/klog - k8s.io/utils diff --git a/staging/publishing/rules.yaml b/staging/publishing/rules.yaml index c52f78b818f..eea4deca458 100644 --- a/staging/publishing/rules.yaml +++ b/staging/publishing/rules.yaml @@ -1295,6 +1295,8 @@ rules: branch: master - repository: controller-manager branch: master + - repository: component-helpers + branch: master - source: branch: release-1.19 dir: staging/src/k8s.io/cloud-provider @@ -1309,6 +1311,8 @@ rules: branch: release-1.19 - repository: component-base branch: release-1.19 + - repository: component-helpers + branch: release-1.19 - source: branch: release-1.20 dir: staging/src/k8s.io/cloud-provider @@ -1327,6 +1331,8 @@ rules: branch: release-1.20 - repository: controller-manager branch: release-1.20 + - repository: component-helpers + branch: release-1.20 - source: branch: release-1.21 dir: staging/src/k8s.io/cloud-provider @@ -1345,6 +1351,8 @@ rules: branch: release-1.21 - repository: controller-manager branch: release-1.21 + - repository: component-helpers + branch: release-1.21 - source: branch: release-1.22 dir: staging/src/k8s.io/cloud-provider @@ -1363,6 +1371,8 @@ rules: branch: release-1.22 - repository: controller-manager branch: release-1.22 + - repository: component-helpers + branch: release-1.22 - destination: kube-controller-manager library: true @@ -1386,6 +1396,8 @@ rules: branch: master - repository: cloud-provider branch: master + - repository: component-helpers + branch: master - source: branch: release-1.19 dir: staging/src/k8s.io/kube-controller-manager @@ -1400,6 +1412,8 @@ rules: branch: release-1.19 - repository: client-go branch: release-1.19 + - repository: component-helpers + branch: release-1.19 - source: branch: release-1.20 dir: staging/src/k8s.io/kube-controller-manager @@ -1420,6 +1434,8 @@ rules: branch: release-1.20 - repository: cloud-provider branch: release-1.20 + - repository: component-helpers + branch: release-1.20 - source: branch: release-1.21 dir: staging/src/k8s.io/kube-controller-manager @@ -1440,6 +1456,8 @@ rules: branch: release-1.21 - repository: cloud-provider branch: release-1.21 + - repository: component-helpers + branch: release-1.21 - source: branch: release-1.22 dir: staging/src/k8s.io/kube-controller-manager @@ -1460,6 +1478,8 @@ rules: branch: release-1.22 - repository: cloud-provider branch: release-1.22 + - repository: component-helpers + branch: release-1.22 - destination: cluster-bootstrap library: true @@ -1622,6 +1642,8 @@ rules: branch: master - repository: mount-utils branch: master + - repository: component-helpers + branch: master - source: branch: release-1.19 dir: staging/src/k8s.io/legacy-cloud-providers @@ -1642,6 +1664,8 @@ rules: branch: release-1.19 - repository: component-base branch: release-1.19 + - repository: component-helpers + branch: release-1.19 - source: branch: release-1.20 dir: staging/src/k8s.io/legacy-cloud-providers @@ -1664,6 +1688,8 @@ rules: branch: release-1.20 - repository: controller-manager branch: release-1.20 + - repository: component-helpers + branch: release-1.20 - source: branch: release-1.21 dir: staging/src/k8s.io/legacy-cloud-providers @@ -1686,6 +1712,8 @@ rules: branch: release-1.21 - repository: controller-manager branch: release-1.21 + - repository: component-helpers + branch: release-1.21 - source: branch: release-1.22 dir: staging/src/k8s.io/legacy-cloud-providers @@ -1710,6 +1738,8 @@ rules: branch: release-1.22 - repository: mount-utils branch: release-1.22 + - repository: component-helpers + branch: release-1.22 - destination: cri-api library: true diff --git a/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go b/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go index 9e29bc20f48..ec8d590a742 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/node/node_controller.go @@ -40,6 +40,7 @@ import ( cloudprovider "k8s.io/cloud-provider" cloudproviderapi "k8s.io/cloud-provider/api" cloudnodeutil "k8s.io/cloud-provider/node/helpers" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" ) @@ -359,7 +360,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1. } newNode := node.DeepCopy() newNode.Status.Addresses = nodeAddresses - if _, _, err := cloudnodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode); err != nil { + if _, _, err := nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode); err != nil { klog.Errorf("Error patching node with cloud ip addresses = [%v]", err) } } @@ -415,7 +416,7 @@ func (cnc *CloudNodeController) syncNode(ctx context.Context, nodeName string) e // TODO(wlan0): Move this logic to the route controller using the node taint instead of condition // Since there are node taints, do we still need this? // This condition marks the node as unusable until routes are initialized in the cloud provider - if err := cloudnodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(nodeName), v1.NodeCondition{ + if err := nodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(nodeName), v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue, Reason: "NoRouteCreated", diff --git a/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go b/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go index 6666da6d5f1..5414faf157e 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go @@ -36,6 +36,7 @@ import ( cloudprovider "k8s.io/cloud-provider" cloudproviderapi "k8s.io/cloud-provider/api" cloudnodeutil "k8s.io/cloud-provider/node/helpers" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" ) @@ -128,7 +129,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) { for _, node := range nodes { // Default NodeReady status to v1.ConditionUnknown status := v1.ConditionUnknown - if _, c := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil { + if _, c := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil { status = c.Status } diff --git a/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go b/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go index 7824b999c94..028ba04b77f 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go @@ -41,8 +41,8 @@ import ( "k8s.io/client-go/tools/record" clientretry "k8s.io/client-go/util/retry" cloudprovider "k8s.io/cloud-provider" - cloudnodeutil "k8s.io/cloud-provider/node/helpers" "k8s.io/component-base/metrics/prometheus/ratelimiter" + nodeutil "k8s.io/component-helpers/node/util" ) const ( @@ -290,7 +290,7 @@ func (rc *RouteController) reconcile(ctx context.Context, nodes []*v1.Node, rout } func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error { - _, condition := cloudnodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable) + _, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable) if routesCreated && condition != nil && condition.Status == v1.ConditionFalse { klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name) return nil @@ -311,7 +311,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate // patch in the retry loop. currentTime := metav1.Now() if routesCreated { - err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ + err = nodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse, Reason: "RouteCreated", @@ -319,7 +319,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate LastTransitionTime: currentTime, }) } else { - err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ + err = nodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue, Reason: "NoRouteCreated", diff --git a/staging/src/k8s.io/cloud-provider/controllers/route/route_controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/route/route_controller_test.go index 3678bdf2e19..e9297a2782c 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/route/route_controller_test.go +++ b/staging/src/k8s.io/cloud-provider/controllers/route/route_controller_test.go @@ -30,7 +30,7 @@ import ( core "k8s.io/client-go/testing" cloudprovider "k8s.io/cloud-provider" fakecloud "k8s.io/cloud-provider/fake" - cloudnodeutil "k8s.io/cloud-provider/node/helpers" + nodeutil "k8s.io/component-helpers/node/util" netutils "k8s.io/utils/net" ) @@ -378,7 +378,7 @@ func TestReconcile(t *testing.T) { for _, action := range testCase.clientset.Actions() { if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" { node := action.(core.UpdateAction).GetObject().(*v1.Node) - _, condition := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) + _, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable) if condition == nil { t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name) } else { diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index 557a08d3172..9a536f3061f 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -14,6 +14,7 @@ require ( k8s.io/apiserver v0.0.0 k8s.io/client-go v0.0.0 k8s.io/component-base v0.0.0 + k8s.io/component-helpers v0.0.0 k8s.io/controller-manager v0.0.0 k8s.io/klog/v2 v2.30.0 k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b @@ -26,5 +27,6 @@ replace ( k8s.io/client-go => ../client-go k8s.io/cloud-provider => ../cloud-provider k8s.io/component-base => ../component-base + k8s.io/component-helpers => ../component-helpers k8s.io/controller-manager => ../controller-manager ) diff --git a/staging/src/k8s.io/component-helpers/node/util/cidr.go b/staging/src/k8s.io/component-helpers/node/util/cidr.go new file mode 100644 index 00000000000..4d626ee0041 --- /dev/null +++ b/staging/src/k8s.io/component-helpers/node/util/cidr.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "encoding/json" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" +) + +type nodeForCIDRMergePatch struct { + Spec nodeSpecForMergePatch `json:"spec"` +} + +type nodeSpecForMergePatch struct { + PodCIDR string `json:"podCIDR"` + PodCIDRs []string `json:"podCIDRs,omitempty"` +} + +// PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value. +func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error { + // set the pod cidrs list and set the old pod cidr field + patch := nodeForCIDRMergePatch{ + Spec: nodeSpecForMergePatch{ + PodCIDR: cidrs[0], + PodCIDRs: cidrs, + }, + } + + patchBytes, err := json.Marshal(&patch) + if err != nil { + return fmt.Errorf("failed to json.Marshal CIDR: %v", err) + } + klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { + return fmt.Errorf("failed to patch node CIDR: %v", err) + } + return nil +} diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/conditions.go b/staging/src/k8s.io/component-helpers/node/util/conditions.go similarity index 96% rename from staging/src/k8s.io/cloud-provider/node/helpers/conditions.go rename to staging/src/k8s.io/component-helpers/node/util/conditions.go index 93500075201..3ad4dda8911 100644 --- a/staging/src/k8s.io/cloud-provider/node/helpers/conditions.go +++ b/staging/src/k8s.io/component-helpers/node/util/conditions.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package helpers +package util import ( "context" diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/status.go b/staging/src/k8s.io/component-helpers/node/util/status.go similarity index 94% rename from staging/src/k8s.io/cloud-provider/node/helpers/status.go rename to staging/src/k8s.io/component-helpers/node/util/status.go index 6330bd78c41..a3666be3100 100644 --- a/staging/src/k8s.io/cloud-provider/node/helpers/status.go +++ b/staging/src/k8s.io/component-helpers/node/util/status.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* - -NOTE: the contents of this file has been copied from k8s.io/kubernetes/pkg/util/node. The reason for duplicating this code is to remove -dependencies for cloud controller manager. -*/ - -package helpers +package util import ( "context" diff --git a/staging/src/k8s.io/component-helpers/node/utils/sysctl/sysctl.go b/staging/src/k8s.io/component-helpers/node/util/sysctl/sysctl.go similarity index 100% rename from staging/src/k8s.io/component-helpers/node/utils/sysctl/sysctl.go rename to staging/src/k8s.io/component-helpers/node/util/sysctl/sysctl.go diff --git a/staging/src/k8s.io/component-helpers/node/utils/sysctl/testing/fake.go b/staging/src/k8s.io/component-helpers/node/util/sysctl/testing/fake.go similarity index 96% rename from staging/src/k8s.io/component-helpers/node/utils/sysctl/testing/fake.go rename to staging/src/k8s.io/component-helpers/node/util/sysctl/testing/fake.go index 2ee5a0f0cd0..cf21c61acdf 100644 --- a/staging/src/k8s.io/component-helpers/node/utils/sysctl/testing/fake.go +++ b/staging/src/k8s.io/component-helpers/node/util/sysctl/testing/fake.go @@ -19,7 +19,7 @@ package testing import ( "os" - "k8s.io/component-helpers/node/utils/sysctl" + "k8s.io/component-helpers/node/util/sysctl" ) // Fake is a map-backed implementation of sysctl.Interface, for testing/mocking. diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index 2405d3dcc79..d2c13261f3d 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -17,6 +17,7 @@ replace ( k8s.io/client-go => ../client-go k8s.io/cloud-provider => ../cloud-provider k8s.io/component-base => ../component-base + k8s.io/component-helpers => ../component-helpers k8s.io/controller-manager => ../controller-manager k8s.io/kube-controller-manager => ../kube-controller-manager ) diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.mod b/staging/src/k8s.io/legacy-cloud-providers/go.mod index 25528984fab..bc9d6c22a43 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.mod +++ b/staging/src/k8s.io/legacy-cloud-providers/go.mod @@ -50,6 +50,7 @@ replace ( k8s.io/client-go => ../client-go k8s.io/cloud-provider => ../cloud-provider k8s.io/component-base => ../component-base + k8s.io/component-helpers => ../component-helpers k8s.io/controller-manager => ../controller-manager k8s.io/csi-translation-lib => ../csi-translation-lib k8s.io/legacy-cloud-providers => ../legacy-cloud-providers diff --git a/test/e2e_node/os_label_rename_test.go b/test/e2e_node/os_label_rename_test.go index 7efccaf1e8f..080f73f3780 100644 --- a/test/e2e_node/os_label_rename_test.go +++ b/test/e2e_node/os_label_rename_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - node2 "k8s.io/kubernetes/pkg/util/node" + nodeutil "k8s.io/component-helpers/node/util" "k8s.io/kubernetes/test/e2e/framework" ) @@ -51,7 +51,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu newNode := node.DeepCopy() newNode.Labels[v1.LabelOSStable] = "dummyOS" newNode.Labels[v1.LabelArchStable] = "dummyArch" - _, _, err := node2.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode) + _, _, err := nodeutil.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode) framework.ExpectNoError(err) // Restart kubelet startKubelet() @@ -70,7 +70,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu newNode := node.DeepCopy() newNode.Labels[v1.LabelOSStable] = "dummyOS" newNode.Labels[v1.LabelArchStable] = "dummyArch" - _, _, err := node2.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode) + _, _, err := nodeutil.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode) framework.ExpectNoError(err) err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute) framework.ExpectNoError(err) diff --git a/test/integration/ipamperf/results.go b/test/integration/ipamperf/results.go index 7d4d036cd48..466825d35e1 100644 --- a/test/integration/ipamperf/results.go +++ b/test/integration/ipamperf/results.go @@ -30,7 +30,7 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" - nodeutil "k8s.io/kubernetes/pkg/controller/util/node" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" ) // Config represents the test configuration that is being run @@ -150,7 +150,7 @@ func (o *Observer) monitor() { nodeInformer := sharedInformer.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) (err error) { + AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) (err error) { name := node.GetName() if node.Spec.PodCIDR != "" { // ignore nodes that have PodCIDR (might be hold over from previous runs that did not get cleaned up) @@ -162,7 +162,7 @@ func (o *Observer) monitor() { o.numAdded = o.numAdded + 1 return }), - UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) (err error) { + UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) (err error) { name := newNode.GetName() nTime, found := o.timing[name] if !found { diff --git a/vendor/modules.txt b/vendor/modules.txt index eec891b8703..b25ac69412f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2011,8 +2011,9 @@ k8s.io/component-helpers/apps/poddisruptionbudget k8s.io/component-helpers/auth/rbac/reconciliation k8s.io/component-helpers/auth/rbac/validation k8s.io/component-helpers/node/topology -k8s.io/component-helpers/node/utils/sysctl -k8s.io/component-helpers/node/utils/sysctl/testing +k8s.io/component-helpers/node/util +k8s.io/component-helpers/node/util/sysctl +k8s.io/component-helpers/node/util/sysctl/testing k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral