remove deprecated flags LowDiskSpaceThresholdMB and OutOfDiskTransitionFrequency
This commit is contained in:
@@ -2334,7 +2334,7 @@ type PodStatus struct {
|
||||
// A human readable message indicating details about why the pod is in this state.
|
||||
// +optional
|
||||
Message string
|
||||
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'
|
||||
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'
|
||||
// +optional
|
||||
Reason string
|
||||
|
||||
|
||||
@@ -309,10 +309,6 @@ type KubeletConfiguration struct {
|
||||
// image garbage collection is never run. Lowest disk usage to garbage
|
||||
// collect to.
|
||||
ImageGCLowThresholdPercent int32
|
||||
// lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to
|
||||
// maintain. When disk space falls below this threshold, new pods would
|
||||
// be rejected.
|
||||
LowDiskSpaceThresholdMB int32
|
||||
// How frequently to calculate and cache volume disk usage for all pods
|
||||
VolumeStatsAggPeriod metav1.Duration
|
||||
// volumePluginDir is the full path of the directory in which to search
|
||||
@@ -412,10 +408,6 @@ type KubeletConfiguration struct {
|
||||
// run docker daemon with version < 1.9 or an Aufs storage backend.
|
||||
// Issue #10959 has more details.
|
||||
SerializeImagePulls bool
|
||||
// outOfDiskTransitionFrequency is duration for which the kubelet has to
|
||||
// wait before transitioning out of out-of-disk node condition status.
|
||||
// +optional
|
||||
OutOfDiskTransitionFrequency metav1.Duration
|
||||
// nodeLabels to add when registering the node in the cluster.
|
||||
NodeLabels map[string]string
|
||||
// nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.
|
||||
|
||||
@@ -363,9 +363,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
||||
if obj.KubeAPIBurst == 0 {
|
||||
obj.KubeAPIBurst = 10
|
||||
}
|
||||
if obj.OutOfDiskTransitionFrequency == zeroDuration {
|
||||
obj.OutOfDiskTransitionFrequency = metav1.Duration{Duration: 5 * time.Minute}
|
||||
}
|
||||
if string(obj.HairpinMode) == "" {
|
||||
obj.HairpinMode = PromiscuousBridge
|
||||
}
|
||||
|
||||
@@ -386,10 +386,6 @@ type KubeletConfiguration struct {
|
||||
// image garbage collection is never run. Lowest disk usage to garbage
|
||||
// collect to. The percent is calculated as this field value out of 100.
|
||||
ImageGCLowThresholdPercent *int32 `json:"imageGCLowThresholdPercent"`
|
||||
// lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to
|
||||
// maintain. When disk space falls below this threshold, new pods would
|
||||
// be rejected.
|
||||
LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"`
|
||||
// How frequently to calculate and cache volume disk usage for all pods
|
||||
VolumeStatsAggPeriod metav1.Duration `json:"volumeStatsAggPeriod"`
|
||||
// volumePluginDir is the full path of the directory in which to search
|
||||
@@ -483,9 +479,6 @@ type KubeletConfiguration struct {
|
||||
// run docker daemon with version < 1.9 or an Aufs storage backend.
|
||||
// Issue #10959 has more details.
|
||||
SerializeImagePulls *bool `json:"serializeImagePulls"`
|
||||
// outOfDiskTransitionFrequency is duration for which the kubelet has to
|
||||
// wait before transitioning out of out-of-disk node condition status.
|
||||
OutOfDiskTransitionFrequency metav1.Duration `json:"outOfDiskTransitionFrequency"`
|
||||
// nodeLabels to add when registering the node in the cluster.
|
||||
NodeLabels map[string]string `json:"nodeLabels"`
|
||||
// nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.
|
||||
|
||||
@@ -12,7 +12,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"active_deadline.go",
|
||||
"disk_manager.go",
|
||||
"doc.go",
|
||||
"kubelet.go",
|
||||
"kubelet_cadvisor.go",
|
||||
@@ -148,7 +147,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"active_deadline_test.go",
|
||||
"disk_manager_test.go",
|
||||
"kubelet_cadvisor_test.go",
|
||||
"kubelet_getters_test.go",
|
||||
"kubelet_network_test.go",
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
)
|
||||
|
||||
// Manages policy for diskspace management for disks holding docker images and root fs.
|
||||
|
||||
// mb is used to easily convert an int to an mb
|
||||
const mb = 1024 * 1024
|
||||
|
||||
// Implementation is thread-safe.
|
||||
type diskSpaceManager interface {
|
||||
// Checks the available disk space
|
||||
IsRootDiskSpaceAvailable() (bool, error)
|
||||
IsRuntimeDiskSpaceAvailable() (bool, error)
|
||||
}
|
||||
|
||||
// DiskSpacePolicy defines the free disk for Docker and Root.
|
||||
type DiskSpacePolicy struct {
|
||||
// free disk space threshold for filesystem holding docker images.
|
||||
DockerFreeDiskMB int
|
||||
// free disk space threshold for root filesystem. Host volumes are created on root fs.
|
||||
RootFreeDiskMB int
|
||||
}
|
||||
|
||||
type fsInfo struct {
|
||||
Usage int64
|
||||
Capacity int64
|
||||
Available int64
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
type realDiskSpaceManager struct {
|
||||
cadvisor cadvisor.Interface
|
||||
cachedInfo map[string]fsInfo // cache of filesystem info.
|
||||
lock sync.Mutex // protecting cachedInfo.
|
||||
policy DiskSpacePolicy // thresholds. Set at creation time.
|
||||
}
|
||||
|
||||
func (dm *realDiskSpaceManager) getFsInfo(fsType string, f func() (cadvisorapi.FsInfo, error)) (fsInfo, error) {
|
||||
dm.lock.Lock()
|
||||
defer dm.lock.Unlock()
|
||||
fsi := fsInfo{}
|
||||
if info, ok := dm.cachedInfo[fsType]; ok {
|
||||
timeLimit := time.Now().Add(-2 * time.Second)
|
||||
if info.Timestamp.After(timeLimit) {
|
||||
fsi = info
|
||||
}
|
||||
}
|
||||
if fsi.Timestamp.IsZero() {
|
||||
fs, err := f()
|
||||
if err != nil {
|
||||
return fsInfo{}, err
|
||||
}
|
||||
fsi.Timestamp = time.Now()
|
||||
fsi.Usage = int64(fs.Usage)
|
||||
fsi.Capacity = int64(fs.Capacity)
|
||||
fsi.Available = int64(fs.Available)
|
||||
dm.cachedInfo[fsType] = fsi
|
||||
}
|
||||
return fsi, nil
|
||||
}
|
||||
|
||||
func (dm *realDiskSpaceManager) IsRuntimeDiskSpaceAvailable() (bool, error) {
|
||||
return dm.isSpaceAvailable("runtime", dm.policy.DockerFreeDiskMB, dm.cadvisor.ImagesFsInfo)
|
||||
}
|
||||
|
||||
func (dm *realDiskSpaceManager) IsRootDiskSpaceAvailable() (bool, error) {
|
||||
return dm.isSpaceAvailable("root", dm.policy.RootFreeDiskMB, dm.cadvisor.RootFsInfo)
|
||||
}
|
||||
|
||||
func (dm *realDiskSpaceManager) isSpaceAvailable(fsType string, threshold int, f func() (cadvisorapi.FsInfo, error)) (bool, error) {
|
||||
fsInfo, err := dm.getFsInfo(fsType, f)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to get fs info for %q: %v", fsType, err)
|
||||
}
|
||||
if fsInfo.Capacity == 0 {
|
||||
return true, fmt.Errorf("could not determine capacity for %q fs. Info: %+v", fsType, fsInfo)
|
||||
}
|
||||
if fsInfo.Available < 0 {
|
||||
return true, fmt.Errorf("wrong available space for %q: %+v", fsType, fsInfo)
|
||||
}
|
||||
|
||||
if fsInfo.Available < int64(threshold)*mb {
|
||||
glog.Infof("Running out of space on disk for %q: available %d MB, threshold %d MB", fsType, fsInfo.Available/mb, threshold)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func validatePolicy(policy DiskSpacePolicy) error {
|
||||
if policy.DockerFreeDiskMB < 0 {
|
||||
return fmt.Errorf("free disk space should be non-negative; invalid value %d for docker disk space threshold", policy.DockerFreeDiskMB)
|
||||
}
|
||||
if policy.RootFreeDiskMB < 0 {
|
||||
return fmt.Errorf("free disk space should be non-negative; invalid value %d for root disk space threshold", policy.RootFreeDiskMB)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDiskSpaceManager(cadvisorInterface cadvisor.Interface, policy DiskSpacePolicy) (diskSpaceManager, error) {
|
||||
// validate policy
|
||||
err := validatePolicy(policy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dm := &realDiskSpaceManager{
|
||||
cadvisor: cadvisorInterface,
|
||||
policy: policy,
|
||||
cachedInfo: map[string]fsInfo{},
|
||||
}
|
||||
|
||||
return dm, nil
|
||||
}
|
||||
@@ -1,295 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
)
|
||||
|
||||
func testPolicy() DiskSpacePolicy {
|
||||
return DiskSpacePolicy{
|
||||
DockerFreeDiskMB: 250,
|
||||
RootFreeDiskMB: 250,
|
||||
}
|
||||
}
|
||||
|
||||
func setUp(t *testing.T) (*assert.Assertions, DiskSpacePolicy, *cadvisortest.Mock) {
|
||||
assert := assert.New(t)
|
||||
policy := testPolicy()
|
||||
c := new(cadvisortest.Mock)
|
||||
return assert, policy, c
|
||||
}
|
||||
|
||||
func TestValidPolicy(t *testing.T) {
|
||||
assert, policy, c := setUp(t)
|
||||
_, err := newDiskSpaceManager(c, policy)
|
||||
assert.NoError(err)
|
||||
|
||||
policy = testPolicy()
|
||||
policy.DockerFreeDiskMB = -1
|
||||
_, err = newDiskSpaceManager(c, policy)
|
||||
assert.Error(err)
|
||||
|
||||
policy = testPolicy()
|
||||
policy.RootFreeDiskMB = -1
|
||||
_, err = newDiskSpaceManager(c, policy)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
func TestSpaceAvailable(t *testing.T) {
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
dm, err := newDiskSpaceManager(mockCadvisor, policy)
|
||||
assert.NoError(err)
|
||||
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 400 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 600 * mb,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 9 * mb,
|
||||
Capacity: 10 * mb,
|
||||
}, nil)
|
||||
|
||||
ok, err := dm.IsRuntimeDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
|
||||
ok, err = dm.IsRootDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.False(ok)
|
||||
}
|
||||
|
||||
// TestIsRuntimeDiskSpaceAvailableWithSpace verifies IsRuntimeDiskSpaceAvailable results when
|
||||
// space is available.
|
||||
func TestIsRuntimeDiskSpaceAvailableWithSpace(t *testing.T) {
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
dm, err := newDiskSpaceManager(mockCadvisor, policy)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 500MB available
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 9500 * mb,
|
||||
Capacity: 10000 * mb,
|
||||
Available: 500 * mb,
|
||||
}, nil)
|
||||
|
||||
ok, err := dm.IsRuntimeDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
}
|
||||
|
||||
// TestIsRuntimeDiskSpaceAvailableWithoutSpace verifies IsRuntimeDiskSpaceAvailable results when
|
||||
// space is not available.
|
||||
func TestIsRuntimeDiskSpaceAvailableWithoutSpace(t *testing.T) {
|
||||
// 1MB available
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 999 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 1 * mb,
|
||||
}, nil)
|
||||
|
||||
dm, err := newDiskSpaceManager(mockCadvisor, policy)
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := dm.IsRuntimeDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.False(ok)
|
||||
}
|
||||
|
||||
// TestIsRootDiskSpaceAvailableWithSpace verifies IsRootDiskSpaceAvailable results when
|
||||
// space is available.
|
||||
func TestIsRootDiskSpaceAvailableWithSpace(t *testing.T) {
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
policy.RootFreeDiskMB = 10
|
||||
dm, err := newDiskSpaceManager(mockCadvisor, policy)
|
||||
assert.NoError(err)
|
||||
|
||||
// 999MB available
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 1 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 999 * mb,
|
||||
}, nil)
|
||||
|
||||
ok, err := dm.IsRootDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
}
|
||||
|
||||
// TestIsRootDiskSpaceAvailableWithoutSpace verifies IsRootDiskSpaceAvailable results when
|
||||
// space is not available.
|
||||
func TestIsRootDiskSpaceAvailableWithoutSpace(t *testing.T) {
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
policy.RootFreeDiskMB = 10
|
||||
dm, err := newDiskSpaceManager(mockCadvisor, policy)
|
||||
assert.NoError(err)
|
||||
|
||||
// 9MB available
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 990 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 9 * mb,
|
||||
}, nil)
|
||||
|
||||
ok, err := dm.IsRootDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.False(ok)
|
||||
}
|
||||
|
||||
// TestCache verifies that caching works properly with DiskSpaceAvailable calls
|
||||
func TestCache(t *testing.T) {
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
dm, err := newDiskSpaceManager(mockCadvisor, policy)
|
||||
assert.NoError(err)
|
||||
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 400 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 300 * mb,
|
||||
}, nil).Once()
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 500 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 500 * mb,
|
||||
}, nil).Once()
|
||||
|
||||
// Initial calls which should be recorded in mockCadvisor
|
||||
ok, err := dm.IsRuntimeDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
|
||||
ok, err = dm.IsRootDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
|
||||
// Get the current count of calls to mockCadvisor
|
||||
cadvisorCallCount := len(mockCadvisor.Calls)
|
||||
|
||||
// Checking for space again shouldn't need to mock as cache would serve it.
|
||||
ok, err = dm.IsRuntimeDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
|
||||
ok, err = dm.IsRootDiskSpaceAvailable()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
|
||||
// Ensure no more calls to the mockCadvisor occurred
|
||||
assert.Equal(cadvisorCallCount, len(mockCadvisor.Calls))
|
||||
}
|
||||
|
||||
// TestFsInfoError verifies errors are returned by DiskSpaceAvailable calls
|
||||
// when FsInfo calls return an error
|
||||
func TestFsInfoError(t *testing.T) {
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
policy.RootFreeDiskMB = 10
|
||||
dm, err := newDiskSpaceManager(mockCadvisor, policy)
|
||||
assert.NoError(err)
|
||||
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("can't find fs"))
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("EBUSY"))
|
||||
ok, err := dm.IsRuntimeDiskSpaceAvailable()
|
||||
assert.Error(err)
|
||||
assert.True(ok)
|
||||
ok, err = dm.IsRootDiskSpaceAvailable()
|
||||
assert.Error(err)
|
||||
assert.True(ok)
|
||||
}
|
||||
|
||||
// Test_getFSInfo verifies multiple possible cases for getFsInfo.
|
||||
func Test_getFsInfo(t *testing.T) {
|
||||
assert, policy, mockCadvisor := setUp(t)
|
||||
|
||||
// Sunny day case
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 10 * mb,
|
||||
Capacity: 100 * mb,
|
||||
Available: 90 * mb,
|
||||
}, nil).Once()
|
||||
|
||||
dm := &realDiskSpaceManager{
|
||||
cadvisor: mockCadvisor,
|
||||
policy: policy,
|
||||
cachedInfo: map[string]fsInfo{},
|
||||
}
|
||||
|
||||
available, err := dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo)
|
||||
assert.True(available)
|
||||
assert.NoError(err)
|
||||
|
||||
// Threshold case
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 9 * mb,
|
||||
Capacity: 100 * mb,
|
||||
Available: 9 * mb,
|
||||
}, nil).Once()
|
||||
|
||||
dm = &realDiskSpaceManager{
|
||||
cadvisor: mockCadvisor,
|
||||
policy: policy,
|
||||
cachedInfo: map[string]fsInfo{},
|
||||
}
|
||||
available, err = dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo)
|
||||
assert.False(available)
|
||||
assert.NoError(err)
|
||||
|
||||
// Frozen case
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 9 * mb,
|
||||
Capacity: 10 * mb,
|
||||
Available: 500 * mb,
|
||||
}, nil).Once()
|
||||
|
||||
dm = &realDiskSpaceManager{
|
||||
cadvisor: mockCadvisor,
|
||||
policy: policy,
|
||||
cachedInfo: map[string]fsInfo{},
|
||||
}
|
||||
available, err = dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo)
|
||||
assert.True(available)
|
||||
assert.NoError(err)
|
||||
|
||||
// Capacity error case
|
||||
mockCadvisor = new(cadvisortest.Mock)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
|
||||
Usage: 9 * mb,
|
||||
Capacity: 0,
|
||||
Available: 500 * mb,
|
||||
}, nil).Once()
|
||||
|
||||
dm = &realDiskSpaceManager{
|
||||
cadvisor: mockCadvisor,
|
||||
policy: policy,
|
||||
cachedInfo: map[string]fsInfo{},
|
||||
}
|
||||
available, err = dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo)
|
||||
assert.True(available)
|
||||
assert.Error(err)
|
||||
assert.Contains(fmt.Sprintf("%s", err), "could not determine capacity")
|
||||
|
||||
// Available error case skipped as v2.FSInfo uses uint64 and this
|
||||
// can not be less than 0
|
||||
}
|
||||
@@ -53,7 +53,6 @@ const (
|
||||
NodeSelectorMismatching = "NodeSelectorMismatching"
|
||||
InsufficientFreeCPU = "InsufficientFreeCPU"
|
||||
InsufficientFreeMemory = "InsufficientFreeMemory"
|
||||
OutOfDisk = "OutOfDisk"
|
||||
HostNetworkNotSupported = "HostNetworkNotSupported"
|
||||
UndefinedShaper = "NilShaper"
|
||||
NodeRebooted = "Rebooted"
|
||||
|
||||
@@ -1363,12 +1363,12 @@ func TestHasNodeConditions(t *testing.T) {
|
||||
result bool
|
||||
}{
|
||||
"has-condition": {
|
||||
inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeOutOfDisk, v1.NodeMemoryPressure},
|
||||
inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeDiskPressure, v1.NodeMemoryPressure},
|
||||
item: v1.NodeMemoryPressure,
|
||||
result: true,
|
||||
},
|
||||
"does-not-have-condition": {
|
||||
inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeOutOfDisk},
|
||||
inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeDiskPressure},
|
||||
item: v1.NodeMemoryPressure,
|
||||
result: false,
|
||||
},
|
||||
|
||||
@@ -368,11 +368,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep
|
||||
LowThresholdPercent: int(kubeCfg.ImageGCLowThresholdPercent),
|
||||
}
|
||||
|
||||
diskSpacePolicy := DiskSpacePolicy{
|
||||
DockerFreeDiskMB: int(kubeCfg.LowDiskSpaceThresholdMB),
|
||||
RootFreeDiskMB: int(kubeCfg.LowDiskSpaceThresholdMB),
|
||||
}
|
||||
|
||||
enforceNodeAllocatable := kubeCfg.EnforceNodeAllocatable
|
||||
if kubeCfg.ExperimentalNodeAllocatableIgnoreEvictionThreshold {
|
||||
// Do not provide kubeCfg.EnforceNodeAllocatable to eviction threshold parsing if we are not enforcing Evictions
|
||||
@@ -416,10 +411,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep
|
||||
Namespace: "",
|
||||
}
|
||||
|
||||
diskSpaceManager, err := newDiskSpaceManager(kubeDeps.CAdvisorInterface, diskSpacePolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize disk manager: %v", err)
|
||||
}
|
||||
containerRefManager := kubecontainer.NewRefManager()
|
||||
|
||||
oomWatcher := NewOOMWatcher(kubeDeps.CAdvisorInterface, kubeDeps.Recorder)
|
||||
@@ -453,7 +444,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep
|
||||
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
|
||||
recorder: kubeDeps.Recorder,
|
||||
cadvisor: kubeDeps.CAdvisorInterface,
|
||||
diskSpaceManager: diskSpaceManager,
|
||||
cloud: kubeDeps.Cloud,
|
||||
autoDetectCloudProvider: (componentconfigv1alpha1.AutoDetectCloudProvider == kubeCfg.CloudProvider),
|
||||
externalCloudProvider: cloudprovider.IsExternal(kubeCfg.CloudProvider),
|
||||
@@ -475,7 +465,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
nodeIP: net.ParseIP(nodeIP),
|
||||
clock: clock.RealClock{},
|
||||
outOfDiskTransitionFrequency: kubeCfg.OutOfDiskTransitionFrequency.Duration,
|
||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||
iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4),
|
||||
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
||||
@@ -924,9 +913,6 @@ type Kubelet struct {
|
||||
// Manager for image garbage collection.
|
||||
imageManager images.ImageGCManager
|
||||
|
||||
// Diskspace manager.
|
||||
diskSpaceManager diskSpaceManager
|
||||
|
||||
// Secret manager.
|
||||
secretManager secret.Manager
|
||||
|
||||
@@ -1044,12 +1030,6 @@ type Kubelet struct {
|
||||
// easy to test the code.
|
||||
clock clock.Clock
|
||||
|
||||
// outOfDiskTransitionFrequency specifies the amount of time the kubelet has to be actually
|
||||
// not out of disk before it can transition the node condition status from out-of-disk to
|
||||
// not-out-of-disk. This prevents a pod that causes out-of-disk condition from repeatedly
|
||||
// getting rescheduled onto the node.
|
||||
outOfDiskTransitionFrequency time.Duration
|
||||
|
||||
// handlers called during the tryUpdateNodeStatus cycle
|
||||
setNodeStatusFuncs []func(*v1.Node) error
|
||||
|
||||
@@ -1650,27 +1630,6 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isOutOfDisk detects if pods can't fit due to lack of disk space.
|
||||
func (kl *Kubelet) isOutOfDisk() bool {
|
||||
// Check disk space once globally and reject or accept all new pods.
|
||||
withinBounds, err := kl.diskSpaceManager.IsRuntimeDiskSpaceAvailable()
|
||||
// Assume enough space in case of errors.
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check if disk space is available for the runtime: %v", err)
|
||||
} else if !withinBounds {
|
||||
return true
|
||||
}
|
||||
|
||||
withinBounds, err = kl.diskSpaceManager.IsRootDiskSpaceAvailable()
|
||||
// Assume enough space in case of errors.
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check if disk space is available on the root partition: %v", err)
|
||||
} else if !withinBounds {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// rejectPod records an event about the pod with the given reason and message,
|
||||
// and updates the pod to the failed phase in the status manage.
|
||||
func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) {
|
||||
@@ -1697,12 +1656,6 @@ func (kl *Kubelet) canAdmitPod(pods []*v1.Pod, pod *v1.Pod) (bool, string, strin
|
||||
return false, result.Reason, result.Message
|
||||
}
|
||||
}
|
||||
// TODO: When disk space scheduling is implemented (#11976), remove the out-of-disk check here and
|
||||
// add the disk space predicate to predicates.GeneralPredicates.
|
||||
if kl.isOutOfDisk() {
|
||||
glog.Warningf("Failed to admit pod %v - %s", format.Pod(pod), "predicate fails due to OutOfDisk")
|
||||
return false, "OutOfDisk", "cannot be started due to lack of disk space."
|
||||
}
|
||||
|
||||
return true, "", ""
|
||||
}
|
||||
|
||||
@@ -852,70 +852,6 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) {
|
||||
}
|
||||
}
|
||||
|
||||
// Set OODCondition for the node.
|
||||
func (kl *Kubelet) setNodeOODCondition(node *v1.Node) {
|
||||
currentTime := metav1.NewTime(kl.clock.Now())
|
||||
var nodeOODCondition *v1.NodeCondition
|
||||
|
||||
// Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update.
|
||||
for i := range node.Status.Conditions {
|
||||
if node.Status.Conditions[i].Type == v1.NodeOutOfDisk {
|
||||
nodeOODCondition = &node.Status.Conditions[i]
|
||||
}
|
||||
}
|
||||
|
||||
newOODCondition := false
|
||||
// If the NodeOutOfDisk condition doesn't exist, create one.
|
||||
if nodeOODCondition == nil {
|
||||
nodeOODCondition = &v1.NodeCondition{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionUnknown,
|
||||
}
|
||||
// nodeOODCondition cannot be appended to node.Status.Conditions here because it gets
|
||||
// copied to the slice. So if we append nodeOODCondition to the slice here none of the
|
||||
// updates we make to nodeOODCondition below are reflected in the slice.
|
||||
newOODCondition = true
|
||||
}
|
||||
|
||||
// Update the heartbeat time irrespective of all the conditions.
|
||||
nodeOODCondition.LastHeartbeatTime = currentTime
|
||||
|
||||
// Note: The conditions below take care of the case when a new NodeOutOfDisk condition is
|
||||
// created and as well as the case when the condition already exists. When a new condition
|
||||
// is created its status is set to v1.ConditionUnknown which matches either
|
||||
// nodeOODCondition.Status != v1.ConditionTrue or
|
||||
// nodeOODCondition.Status != v1.ConditionFalse in the conditions below depending on whether
|
||||
// the kubelet is out of disk or not.
|
||||
if kl.isOutOfDisk() {
|
||||
if nodeOODCondition.Status != v1.ConditionTrue {
|
||||
nodeOODCondition.Status = v1.ConditionTrue
|
||||
nodeOODCondition.Reason = "KubeletOutOfDisk"
|
||||
nodeOODCondition.Message = "out of disk space"
|
||||
nodeOODCondition.LastTransitionTime = currentTime
|
||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeOutOfDisk")
|
||||
}
|
||||
} else {
|
||||
if nodeOODCondition.Status != v1.ConditionFalse {
|
||||
// Update the out of disk condition when the condition status is unknown even if we
|
||||
// are within the outOfDiskTransitionFrequency duration. We do this to set the
|
||||
// condition status correctly at kubelet startup.
|
||||
if nodeOODCondition.Status == v1.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency {
|
||||
nodeOODCondition.Status = v1.ConditionFalse
|
||||
nodeOODCondition.Reason = "KubeletHasSufficientDisk"
|
||||
nodeOODCondition.Message = "kubelet has sufficient disk space available"
|
||||
nodeOODCondition.LastTransitionTime = currentTime
|
||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientDisk")
|
||||
} else {
|
||||
glog.Infof("Node condition status for OutOfDisk is false, but last transition time is less than %s", kl.outOfDiskTransitionFrequency)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if newOODCondition {
|
||||
node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition)
|
||||
}
|
||||
}
|
||||
|
||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||
// TODO: why is this a package var?
|
||||
var oldNodeUnschedulable bool
|
||||
@@ -966,7 +902,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
||||
return []func(*v1.Node) error{
|
||||
kl.setNodeAddress,
|
||||
withoutError(kl.setNodeStatusInfo),
|
||||
withoutError(kl.setNodeOODCondition),
|
||||
withoutError(kl.setNodeMemoryPressureCondition),
|
||||
withoutError(kl.setNodeDiskPressureCondition),
|
||||
withoutError(kl.setNodeReadyCondition),
|
||||
|
||||
@@ -163,22 +163,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
// Make kubelet report that it has sufficient disk space.
|
||||
require.NoError(t, updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100))
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
@@ -256,80 +245,6 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
||||
}
|
||||
|
||||
func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
MachineID: "123",
|
||||
SystemUUID: "abc",
|
||||
BootID: "1b3",
|
||||
NumCores: 2,
|
||||
MemoryCapacity: 1024,
|
||||
}
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
mockCadvisor.On("Start").Return(nil)
|
||||
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
||||
versionInfo := &cadvisorapi.VersionInfo{
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
// Make Kubelet report that it has sufficient disk space.
|
||||
err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100)
|
||||
require.NoError(t, err, "update the disk space manager")
|
||||
|
||||
kubelet.outOfDiskTransitionFrequency = 10 * time.Second
|
||||
|
||||
expectedNodeOutOfDiskCondition := v1.NodeCondition{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
}
|
||||
|
||||
kubelet.updateRuntimeUp()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
require.Equal(t, "status", actions[1].GetSubresource())
|
||||
|
||||
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
||||
assert.NoError(t, err, "apply the node status patch")
|
||||
|
||||
var oodCondition v1.NodeCondition
|
||||
for i, cond := range updatedNode.Status.Conditions {
|
||||
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
|
||||
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
|
||||
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
||||
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
||||
if cond.Type == v1.NodeOutOfDisk {
|
||||
oodCondition = updatedNode.Status.Conditions[i]
|
||||
}
|
||||
}
|
||||
assert.EqualValues(t, expectedNodeOutOfDiskCondition, oodCondition)
|
||||
}
|
||||
|
||||
func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
@@ -352,14 +267,6 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletOutOfDisk",
|
||||
Message: "out of disk space",
|
||||
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
@@ -414,23 +321,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
// Make kubelet report that it is out of disk space.
|
||||
err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100)
|
||||
require.NoError(t, err, "update the disk space manager")
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletOutOfDisk",
|
||||
Message: "out of disk space",
|
||||
LastHeartbeatTime: metav1.Time{}, // placeholder
|
||||
LastTransitionTime: metav1.Time{}, // placeholder
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
@@ -524,167 +419,6 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
||||
}
|
||||
|
||||
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
clock := testKubelet.fakeClock
|
||||
// Do not set nano second, because apiserver function doesn't support nano second. (Only support
|
||||
// RFC3339).
|
||||
clock.SetTime(time.Unix(123456, 0))
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletReady",
|
||||
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastHeartbeatTime: metav1.NewTime(clock.Now()),
|
||||
LastTransitionTime: metav1.NewTime(clock.Now()),
|
||||
},
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletOutOfDisk",
|
||||
Message: "out of disk space",
|
||||
LastHeartbeatTime: metav1.NewTime(clock.Now()),
|
||||
LastTransitionTime: metav1.NewTime(clock.Now()),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
MachineID: "123",
|
||||
SystemUUID: "abc",
|
||||
BootID: "1b3",
|
||||
NumCores: 2,
|
||||
MemoryCapacity: 1024,
|
||||
}
|
||||
fsInfo := cadvisorapiv2.FsInfo{
|
||||
Device: "123",
|
||||
}
|
||||
mockCadvisor.On("Start").Return(nil)
|
||||
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
||||
mockCadvisor.On("ImagesFsInfo").Return(fsInfo, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(fsInfo, nil)
|
||||
versionInfo := &cadvisorapi.VersionInfo{
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
DockerVersion: "1.5.0",
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
kubelet.outOfDiskTransitionFrequency = 5 * time.Second
|
||||
|
||||
ood := v1.NodeCondition{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletOutOfDisk",
|
||||
Message: "out of disk space",
|
||||
LastHeartbeatTime: metav1.NewTime(clock.Now()), // placeholder
|
||||
LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder
|
||||
}
|
||||
noOod := v1.NodeCondition{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
||||
LastHeartbeatTime: metav1.NewTime(clock.Now()), // placeholder
|
||||
LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
rootFsAvail uint64
|
||||
dockerFsAvail uint64
|
||||
expected v1.NodeCondition
|
||||
}{
|
||||
{
|
||||
// NodeOutOfDisk==false
|
||||
rootFsAvail: 200,
|
||||
dockerFsAvail: 200,
|
||||
expected: ood,
|
||||
},
|
||||
{
|
||||
// NodeOutOfDisk==true
|
||||
rootFsAvail: 50,
|
||||
dockerFsAvail: 200,
|
||||
expected: ood,
|
||||
},
|
||||
{
|
||||
// NodeOutOfDisk==false
|
||||
rootFsAvail: 200,
|
||||
dockerFsAvail: 200,
|
||||
expected: ood,
|
||||
},
|
||||
{
|
||||
// NodeOutOfDisk==true
|
||||
rootFsAvail: 200,
|
||||
dockerFsAvail: 50,
|
||||
expected: ood,
|
||||
},
|
||||
{
|
||||
// NodeOutOfDisk==false
|
||||
rootFsAvail: 200,
|
||||
dockerFsAvail: 200,
|
||||
expected: noOod,
|
||||
},
|
||||
}
|
||||
|
||||
kubelet.updateRuntimeUp()
|
||||
for tcIdx, tc := range testCases {
|
||||
// Step by a second
|
||||
clock.Step(1 * time.Second)
|
||||
|
||||
// Setup expected times.
|
||||
tc.expected.LastHeartbeatTime = metav1.NewTime(clock.Now())
|
||||
// In the last case, there should be a status transition for NodeOutOfDisk
|
||||
if tcIdx == len(testCases)-1 {
|
||||
tc.expected.LastTransitionTime = metav1.NewTime(clock.Now())
|
||||
}
|
||||
|
||||
// Make kubelet report that it has sufficient disk space
|
||||
err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100)
|
||||
require.NoError(t, err, "can't update disk space manager")
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
assert.Len(t, actions, 2, "test [%d]", tcIdx)
|
||||
|
||||
assert.IsType(t, core.PatchActionImpl{}, actions[1])
|
||||
patchAction := actions[1].(core.PatchActionImpl)
|
||||
|
||||
updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
|
||||
require.NoError(t, err, "can't apply node status patch")
|
||||
kubeClient.ClearActions()
|
||||
|
||||
var oodCondition v1.NodeCondition
|
||||
for i, cond := range updatedNode.Status.Conditions {
|
||||
if cond.Type == v1.NodeOutOfDisk {
|
||||
oodCondition = updatedNode.Status.Conditions[i]
|
||||
}
|
||||
}
|
||||
assert.EqualValues(t, tc.expected, oodCondition)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
@@ -721,22 +455,11 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
// Make kubelet report that it has sufficient disk space.
|
||||
require.NoError(t, updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100))
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
Message: "kubelet has sufficient disk space available",
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
@@ -943,13 +666,13 @@ func TestRegisterWithApiServer(t *testing.T) {
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 600 * mb,
|
||||
Usage: 400,
|
||||
Capacity: 1000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 9 * mb,
|
||||
Capacity: 10 * mb,
|
||||
Usage: 9,
|
||||
Capacity: 10,
|
||||
}, nil)
|
||||
|
||||
done := make(chan struct{})
|
||||
@@ -1178,9 +901,6 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
// Make kubelet report that it has sufficient disk space.
|
||||
require.NoError(t, updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100))
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
|
||||
@@ -213,11 +213,6 @@ func newTestKubeletWithImageList(
|
||||
kubelet.configMapManager = configMapManager
|
||||
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager, kubelet.configMapManager)
|
||||
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{})
|
||||
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
|
||||
if err != nil {
|
||||
t.Fatalf("can't initialize disk space manager: %v", err)
|
||||
}
|
||||
kubelet.diskSpaceManager = diskSpaceManager
|
||||
|
||||
kubelet.containerRuntime = fakeRuntime
|
||||
kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
|
||||
@@ -737,25 +732,6 @@ func TestValidateContainerLogStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along
|
||||
// with the mock FsInfo values added to Cadvisor should make the kubelet report that it has
|
||||
// sufficient disk space or it is out of disk, depending on the capacity, availability and
|
||||
// threshold values.
|
||||
func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, rootCap, dockerCap, rootAvail, dockerAvail uint64, rootThreshold, dockerThreshold int) error {
|
||||
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: rootCap * mb, Available: rootAvail * mb}
|
||||
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: dockerCap * mb, Available: dockerAvail * mb}
|
||||
mockCadvisor.On("ImagesFsInfo").Return(dockerimagesFsInfo, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
|
||||
|
||||
dsp := DiskSpacePolicy{DockerFreeDiskMB: rootThreshold, RootFreeDiskMB: dockerThreshold}
|
||||
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubelet.diskSpaceManager = diskSpaceManager
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCreateMirrorPod(t *testing.T) {
|
||||
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
|
||||
@@ -55,19 +55,18 @@ func TestRunOnce(t *testing.T) {
|
||||
cadvisor := &cadvisortest.Mock{}
|
||||
cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400 * mb,
|
||||
Capacity: 1000 * mb,
|
||||
Available: 600 * mb,
|
||||
Usage: 400,
|
||||
Capacity: 1000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 9 * mb,
|
||||
Capacity: 10 * mb,
|
||||
Usage: 9,
|
||||
Capacity: 10,
|
||||
}, nil)
|
||||
fakeSecretManager := secret.NewFakeManager()
|
||||
fakeConfigMapManager := configmap.NewFakeManager()
|
||||
podManager := kubepod.NewBasicPodManager(
|
||||
podtest.NewFakeMirrorClient(), fakeSecretManager, fakeConfigMapManager)
|
||||
diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
basePath, err := utiltesting.MkTmpdir("kubelet")
|
||||
if err != nil {
|
||||
@@ -82,7 +81,6 @@ func TestRunOnce(t *testing.T) {
|
||||
statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}),
|
||||
podManager: podManager,
|
||||
os: &containertest.FakeOS{},
|
||||
diskSpaceManager: diskSpaceManager,
|
||||
containerRuntime: fakeRuntime,
|
||||
reasonCache: NewReasonCache(),
|
||||
clock: clock.RealClock{},
|
||||
|
||||
@@ -134,14 +134,12 @@ func GetHollowKubeletConfig(
|
||||
c.MinimumGCAge.Duration = 1 * time.Minute
|
||||
c.NodeStatusUpdateFrequency.Duration = 10 * time.Second
|
||||
c.SyncFrequency.Duration = 10 * time.Second
|
||||
c.OutOfDiskTransitionFrequency.Duration = 5 * time.Minute
|
||||
c.EvictionPressureTransitionPeriod.Duration = 5 * time.Minute
|
||||
c.MaxPods = int32(maxPods)
|
||||
c.PodsPerCore = int32(podsPerCore)
|
||||
c.ClusterDNS = []string{}
|
||||
c.ImageGCHighThresholdPercent = 90
|
||||
c.ImageGCLowThresholdPercent = 80
|
||||
c.LowDiskSpaceThresholdMB = 256
|
||||
c.VolumeStatsAggPeriod.Duration = time.Minute
|
||||
c.CgroupRoot = ""
|
||||
c.ContainerRuntime = kubetypes.DockerContainerRuntime
|
||||
|
||||
@@ -1588,7 +1588,7 @@ func TestPrintPod(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test5"},
|
||||
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
|
||||
Status: api.PodStatus{
|
||||
Reason: "OutOfDisk",
|
||||
Reason: "podReason",
|
||||
Phase: "podPhase",
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
|
||||
@@ -1596,7 +1596,7 @@ func TestPrintPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
[]metav1alpha1.TableRow{{Cells: []interface{}{"test5", "1/2", "OutOfDisk", 6, "<unknown>"}}},
|
||||
[]metav1alpha1.TableRow{{Cells: []interface{}{"test5", "1/2", "podReason", 6, "<unknown>"}}},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user