kubelet: remove unused bandwidth shaping teardown code

Since v1.5 and the removal of --configure-cbr0:

0800df74ab "Remove the legacy networking mode --configure-cbr0"

kubelet hasn't done any shaping operations internally.  They
have all been delegated to network plugins like kubenet or
external CNI plugins.  But some shaping code was still left
in kubelet, so remove it now that it's unused.
This commit is contained in:
Dan Williams 2017-06-06 10:27:35 -05:00
parent 17e19dfce6
commit 5b8ad3f7c5
6 changed files with 1 additions and 163 deletions

View File

@ -96,7 +96,6 @@ go_library(
"//pkg/security/apparmor:go_default_library", "//pkg/security/apparmor:go_default_library",
"//pkg/securitycontext:go_default_library", "//pkg/securitycontext:go_default_library",
"//pkg/util:go_default_library", "//pkg/util:go_default_library",
"//pkg/util/bandwidth:go_default_library",
"//pkg/util/dbus:go_default_library", "//pkg/util/dbus:go_default_library",
"//pkg/util/exec:go_default_library", "//pkg/util/exec:go_default_library",
"//pkg/util/io:go_default_library", "//pkg/util/io:go_default_library",
@ -201,7 +200,6 @@ go_test(
"//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/queue:go_default_library",
"//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library",
"//pkg/kubelet/volumemanager:go_default_library", "//pkg/kubelet/volumemanager:go_default_library",
"//pkg/util/bandwidth:go_default_library",
"//pkg/util/mount:go_default_library", "//pkg/util/mount:go_default_library",
"//pkg/version:go_default_library", "//pkg/version:go_default_library",
"//pkg/volume:go_default_library", "//pkg/volume:go_default_library",

View File

@ -102,7 +102,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/kubelet/volumemanager" "k8s.io/kubernetes/pkg/kubelet/volumemanager"
"k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/util/bandwidth"
utildbus "k8s.io/kubernetes/pkg/util/dbus" utildbus "k8s.io/kubernetes/pkg/util/dbus"
utilexec "k8s.io/kubernetes/pkg/util/exec" utilexec "k8s.io/kubernetes/pkg/util/exec"
kubeio "k8s.io/kubernetes/pkg/util/io" kubeio "k8s.io/kubernetes/pkg/util/io"
@ -1027,10 +1026,6 @@ type Kubelet struct {
// clusterDomain and clusterDNS. // clusterDomain and clusterDNS.
resolverConfig string resolverConfig string
// Optionally shape the bandwidth of a pod
// TODO: remove when kubenet plugin is ready
shaper bandwidth.BandwidthShaper
// Information about the ports which are opened by daemons on Node running this Kubelet server. // Information about the ports which are opened by daemons on Node running this Kubelet server.
daemonEndpoints *v1.NodeDaemonEndpoints daemonEndpoints *v1.NodeDaemonEndpoints
@ -1632,28 +1627,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
return err return err
} }
// early successful exit if pod is not bandwidth-constrained
if !kl.shapingEnabled() {
return nil
}
// Update the traffic shaping for the pod's ingress and egress limits
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations)
if err != nil {
return err
}
if egress != nil || ingress != nil {
if kubecontainer.IsHostNetworkPod(pod) {
kl.recorder.Event(pod, v1.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
} else if kl.shaper != nil {
if len(apiPodStatus.PodIP) > 0 {
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", apiPodStatus.PodIP), egress, ingress)
}
} else {
kl.recorder.Event(pod, v1.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
}
}
return nil return nil
} }

View File

@ -25,10 +25,8 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/kubelet/network" "k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/util/bandwidth"
utiliptables "k8s.io/kubernetes/pkg/util/iptables" utiliptables "k8s.io/kubernetes/pkg/util/iptables"
) )
@ -244,51 +242,6 @@ func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, sear
return nameservers, searches, nil return nameservers, searches, nil
} }
// cleanupBandwidthLimits updates the status of bandwidth-limited containers
// and ensures that only the appropriate CIDRs are active on the node.
func (kl *Kubelet) cleanupBandwidthLimits(allPods []*v1.Pod) error {
if kl.shaper == nil {
return nil
}
currentCIDRs, err := kl.shaper.GetCIDRs()
if err != nil {
return err
}
possibleCIDRs := sets.String{}
for ix := range allPods {
pod := allPods[ix]
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations)
if err != nil {
return err
}
if ingress == nil && egress == nil {
glog.V(8).Infof("Not a bandwidth limited container...")
continue
}
status, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
// TODO(random-liu): Cleanup status get functions. (issue #20477)
s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
if err != nil {
return err
}
status = kl.generateAPIPodStatus(pod, s)
}
if status.Phase == v1.PodRunning {
possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP))
}
}
for _, cidr := range currentCIDRs {
if !possibleCIDRs.Has(cidr) {
glog.V(2).Infof("Removing CIDR: %s (%v)", cidr, possibleCIDRs)
if err := kl.shaper.Reset(cidr); err != nil {
return err
}
}
}
return nil
}
// syncNetworkStatus updates the network state // syncNetworkStatus updates the network state
func (kl *Kubelet) syncNetworkStatus() { func (kl *Kubelet) syncNetworkStatus() {
// For cri integration, network state will be updated in updateRuntimeUp, // For cri integration, network state will be updated in updateRuntimeUp,

View File

@ -26,7 +26,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/util/bandwidth"
) )
func TestNodeIPParam(t *testing.T) { func TestNodeIPParam(t *testing.T) {
@ -184,85 +183,6 @@ func TestComposeDNSSearch(t *testing.T) {
} }
} }
func TestCleanupBandwidthLimits(t *testing.T) {
testPod := func(name, ingress string) *v1.Pod {
pod := podWithUidNameNs("", name, "")
if len(ingress) != 0 {
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
}
return pod
}
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
// layer status getter function and test that function instead.
tests := []struct {
status *v1.PodStatus
pods []*v1.Pod
inputCIDRs []string
expectResetCIDRs []string
name string
}{
{
status: &v1.PodStatus{
PodIP: "1.2.3.4",
Phase: v1.PodRunning,
},
pods: []*v1.Pod{
testPod("foo", "10M"),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
name: "pod running",
},
{
status: &v1.PodStatus{
PodIP: "1.2.3.4",
Phase: v1.PodFailed,
},
pods: []*v1.Pod{
testPod("foo", "10M"),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
name: "pod not running",
},
{
status: &v1.PodStatus{
PodIP: "1.2.3.4",
Phase: v1.PodFailed,
},
pods: []*v1.Pod{
testPod("foo", ""),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
name: "no bandwidth limits",
},
}
for _, test := range tests {
shaper := &bandwidth.FakeShaper{
CIDRs: test.inputCIDRs,
}
testKube := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKube.Cleanup()
testKube.kubelet.shaper = shaper
for _, pod := range test.pods {
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
}
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
assert.NoError(t, err, "test [%s]", test.name)
assert.EqualValues(t, test.expectResetCIDRs, shaper.ResetCIDRs, "test[%s]", test.name)
}
}
func TestGetIPTablesMark(t *testing.T) { func TestGetIPTablesMark(t *testing.T) {
tests := []struct { tests := []struct {
bit int bit int

View File

@ -893,12 +893,6 @@ func (kl *Kubelet) HandlePodCleanups() error {
// Remove any orphaned mirror pods. // Remove any orphaned mirror pods.
kl.podManager.DeleteOrphanedMirrorPods() kl.podManager.DeleteOrphanedMirrorPods()
// Clear out any old bandwidth rules
err = kl.cleanupBandwidthLimits(allPods)
if err != nil {
glog.Errorf("Failed cleaning up bandwidth limits: %v", err)
}
// Remove any cgroups in the hierarchy for pods that are no longer running. // Remove any cgroups in the hierarchy for pods that are no longer running.
if kl.cgroupsPerQOS { if kl.cgroupsPerQOS {
kl.cleanupOrphanedPodCgroups(cgroupPods, activePods) kl.cleanupOrphanedPodCgroups(cgroupPods, activePods)

View File

@ -125,7 +125,7 @@ func TestGetPodNetworkStatus(t *testing.T) {
} }
} }
// TestTeardownBeforeSetUp tests that a `TearDown` call does call // TestTeardownCallsShaper tests that a `TearDown` call does call
// `shaper.Reset` // `shaper.Reset`
func TestTeardownCallsShaper(t *testing.T) { func TestTeardownCallsShaper(t *testing.T) {
fexec := &exec.FakeExec{ fexec := &exec.FakeExec{