Expose image list in node status
Change image manager to use repotag
This commit is contained in:
parent
66d3cbf889
commit
27ca7dc71e
@ -1544,6 +1544,16 @@ type NodeStatus struct {
|
|||||||
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty"`
|
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty"`
|
||||||
// Set of ids/uuids to uniquely identify the node.
|
// Set of ids/uuids to uniquely identify the node.
|
||||||
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
||||||
|
// List of container images on this node
|
||||||
|
Images []ContainerImage `json:"images",omitempty`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe a container image
|
||||||
|
type ContainerImage struct {
|
||||||
|
// Names by which this image is known.
|
||||||
|
RepoTags []string `json:"repoTags"`
|
||||||
|
// The size of the image in bytes.
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodePhase string
|
type NodePhase string
|
||||||
|
@ -1911,6 +1911,17 @@ type NodeStatus struct {
|
|||||||
// Set of ids/uuids to uniquely identify the node.
|
// Set of ids/uuids to uniquely identify the node.
|
||||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info
|
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info
|
||||||
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"`
|
||||||
|
// List of container images on this node
|
||||||
|
Images []ContainerImage `json:"images",omitempty`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe a container image
|
||||||
|
type ContainerImage struct {
|
||||||
|
// Names by which this image is known.
|
||||||
|
// e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
|
||||||
|
RepoTags []string `json:"repoTags"`
|
||||||
|
// The size of the image in bytes.
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodePhase string
|
type NodePhase string
|
||||||
|
@ -319,7 +319,7 @@ type Image struct {
|
|||||||
// ID of the image.
|
// ID of the image.
|
||||||
ID string
|
ID string
|
||||||
// Other names by which this image is known.
|
// Other names by which this image is known.
|
||||||
Tags []string
|
RepoTags []string
|
||||||
// The size of the image in bytes.
|
// The size of the image in bytes.
|
||||||
Size int64
|
Size int64
|
||||||
}
|
}
|
||||||
|
@ -75,9 +75,9 @@ func toRuntimeImage(image *docker.APIImages) (*kubecontainer.Image, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &kubecontainer.Image{
|
return &kubecontainer.Image{
|
||||||
ID: image.ID,
|
ID: image.ID,
|
||||||
Tags: image.RepoTags,
|
RepoTags: image.RepoTags,
|
||||||
Size: image.VirtualSize,
|
Size: image.VirtualSize,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,9 +75,9 @@ func TestToRuntimeImage(t *testing.T) {
|
|||||||
VirtualSize: 1234,
|
VirtualSize: 1234,
|
||||||
}
|
}
|
||||||
expected := &kubecontainer.Image{
|
expected := &kubecontainer.Image{
|
||||||
ID: "aeeea",
|
ID: "aeeea",
|
||||||
Tags: []string{"abc", "def"},
|
RepoTags: []string{"abc", "def"},
|
||||||
Size: 1234,
|
Size: 1234,
|
||||||
}
|
}
|
||||||
|
|
||||||
actual, err := toRuntimeImage(original)
|
actual, err := toRuntimeImage(original)
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
)
|
)
|
||||||
@ -42,6 +43,8 @@ type imageManager interface {
|
|||||||
// Start async garbage collection of images.
|
// Start async garbage collection of images.
|
||||||
Start() error
|
Start() error
|
||||||
|
|
||||||
|
GetImageList() ([]kubecontainer.Image, error)
|
||||||
|
|
||||||
// TODO(vmarmol): Have this subsume pulls as well.
|
// TODO(vmarmol): Have this subsume pulls as well.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,6 +138,15 @@ func (im *realImageManager) Start() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get a list of images on this node
|
||||||
|
func (im *realImageManager) GetImageList() ([]kubecontainer.Image, error) {
|
||||||
|
images, err := im.runtime.ListImages()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return images, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (im *realImageManager) detectImages(detected time.Time) error {
|
func (im *realImageManager) detectImages(detected time.Time) error {
|
||||||
images, err := im.runtime.ListImages()
|
images, err := im.runtime.ListImages()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -298,7 +310,7 @@ func isImageUsed(image container.Image, imagesInUse sets.String) bool {
|
|||||||
if _, ok := imagesInUse[image.ID]; ok {
|
if _, ok := imagesInUse[image.ID]; ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
for _, tag := range image.Tags {
|
for _, tag := range image.RepoTags {
|
||||||
if _, ok := imagesInUse[tag]; ok {
|
if _, ok := imagesInUse[tag]; ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -312,9 +312,9 @@ func TestFreeSpaceImagesAlsoDoesLookupByRepoTags(t *testing.T) {
|
|||||||
fakeRuntime.ImageList = []container.Image{
|
fakeRuntime.ImageList = []container.Image{
|
||||||
makeImage(0, 1024),
|
makeImage(0, 1024),
|
||||||
{
|
{
|
||||||
ID: "5678",
|
ID: "5678",
|
||||||
Tags: []string{"potato", "salad"},
|
RepoTags: []string{"potato", "salad"},
|
||||||
Size: 2048,
|
Size: 2048,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fakeRuntime.AllPodList = []*container.Pod{
|
fakeRuntime.AllPodList = []*container.Pod{
|
||||||
|
@ -2818,11 +2818,30 @@ func (kl *Kubelet) setNodeStatusDaemonEndpoints(node *api.Node) {
|
|||||||
node.Status.DaemonEndpoints = *kl.daemonEndpoints
|
node.Status.DaemonEndpoints = *kl.daemonEndpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set images list fot this node
|
||||||
|
func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
|
||||||
|
// Update image list of this node
|
||||||
|
var imagesOnNode []api.ContainerImage
|
||||||
|
containerImages, err := kl.imageManager.GetImageList()
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error getting image list: %v", err)
|
||||||
|
} else {
|
||||||
|
for _, image := range containerImages {
|
||||||
|
imagesOnNode = append(imagesOnNode, api.ContainerImage{
|
||||||
|
RepoTags: image.RepoTags,
|
||||||
|
Size: image.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
node.Status.Images = imagesOnNode
|
||||||
|
}
|
||||||
|
|
||||||
// Set status for the node.
|
// Set status for the node.
|
||||||
func (kl *Kubelet) setNodeStatusInfo(node *api.Node) {
|
func (kl *Kubelet) setNodeStatusInfo(node *api.Node) {
|
||||||
kl.setNodeStatusMachineInfo(node)
|
kl.setNodeStatusMachineInfo(node)
|
||||||
kl.setNodeStatusVersionInfo(node)
|
kl.setNodeStatusVersionInfo(node)
|
||||||
kl.setNodeStatusDaemonEndpoints(node)
|
kl.setNodeStatusDaemonEndpoints(node)
|
||||||
|
kl.setNodeStatusImages(node)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set Readycondition for the node.
|
// Set Readycondition for the node.
|
||||||
|
@ -91,6 +91,18 @@ type TestKubelet struct {
|
|||||||
func newTestKubelet(t *testing.T) *TestKubelet {
|
func newTestKubelet(t *testing.T) *TestKubelet {
|
||||||
fakeRuntime := &kubecontainer.FakeRuntime{}
|
fakeRuntime := &kubecontainer.FakeRuntime{}
|
||||||
fakeRuntime.VersionInfo = "1.15"
|
fakeRuntime.VersionInfo = "1.15"
|
||||||
|
fakeRuntime.ImageList = []kubecontainer.Image{
|
||||||
|
{
|
||||||
|
ID: "abc",
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
||||||
|
Size: 123,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "efg",
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
||||||
|
Size: 456,
|
||||||
|
},
|
||||||
|
}
|
||||||
fakeRecorder := &record.FakeRecorder{}
|
fakeRecorder := &record.FakeRecorder{}
|
||||||
fakeKubeClient := &testclient.Fake{}
|
fakeKubeClient := &testclient.Fake{}
|
||||||
kubelet := &Kubelet{}
|
kubelet := &Kubelet{}
|
||||||
@ -144,6 +156,17 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||||||
|
|
||||||
kubelet.volumeManager = newVolumeManager()
|
kubelet.volumeManager = newVolumeManager()
|
||||||
kubelet.containerManager = cm.NewStubContainerManager()
|
kubelet.containerManager = cm.NewStubContainerManager()
|
||||||
|
fakeNodeRef := &api.ObjectReference{
|
||||||
|
Kind: "Node",
|
||||||
|
Name: testKubeletHostname,
|
||||||
|
UID: types.UID(testKubeletHostname),
|
||||||
|
Namespace: "",
|
||||||
|
}
|
||||||
|
fakeImageGCPolicy := ImageGCPolicy{
|
||||||
|
HighThresholdPercent: 90,
|
||||||
|
LowThresholdPercent: 80,
|
||||||
|
}
|
||||||
|
kubelet.imageManager, err = newImageManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
|
||||||
fakeClock := &util.FakeClock{Time: time.Now()}
|
fakeClock := &util.FakeClock{Time: time.Now()}
|
||||||
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
|
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
|
||||||
kubelet.backOff.Clock = fakeClock
|
kubelet.backOff.Clock = fakeClock
|
||||||
@ -2557,6 +2580,16 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||||||
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
||||||
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
},
|
},
|
||||||
|
Images: []api.ContainerImage{
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
||||||
|
Size: 123,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
||||||
|
Size: 456,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2745,6 +2778,16 @@ func testDockerRuntimeVersion(t *testing.T) {
|
|||||||
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
||||||
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
},
|
},
|
||||||
|
Images: []api.ContainerImage{
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
||||||
|
Size: 123,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
||||||
|
Size: 456,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2905,6 +2948,16 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||||||
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
||||||
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
},
|
},
|
||||||
|
Images: []api.ContainerImage{
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
||||||
|
Size: 123,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
||||||
|
Size: 456,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3173,6 +3226,16 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
|||||||
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
|
||||||
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
|
||||||
},
|
},
|
||||||
|
Images: []api.ContainerImage{
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
||||||
|
Size: 123,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
||||||
|
Size: 456,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
kubelet.runtimeState = newRuntimeState(time.Duration(0), false, "" /* Pod CIDR */, func() error { return nil })
|
kubelet.runtimeState = newRuntimeState(time.Duration(0), false, "" /* Pod CIDR */, func() error { return nil })
|
||||||
|
@ -86,8 +86,8 @@ func (r *Runtime) ListImages() ([]kubecontainer.Image, error) {
|
|||||||
images := make([]kubecontainer.Image, len(listResp.Images))
|
images := make([]kubecontainer.Image, len(listResp.Images))
|
||||||
for i, image := range listResp.Images {
|
for i, image := range listResp.Images {
|
||||||
images[i] = kubecontainer.Image{
|
images[i] = kubecontainer.Image{
|
||||||
ID: image.Id,
|
ID: image.Id,
|
||||||
Tags: []string{buildImageName(image)},
|
RepoTags: []string{buildImageName(image)},
|
||||||
//TODO: fill in the size of the image
|
//TODO: fill in the size of the image
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -283,8 +283,8 @@ func TestListImages(t *testing.T) {
|
|||||||
},
|
},
|
||||||
[]kubecontainer.Image{
|
[]kubecontainer.Image{
|
||||||
{
|
{
|
||||||
ID: "sha512-a2fb8f390702",
|
ID: "sha512-a2fb8f390702",
|
||||||
Tags: []string{"quay.io/coreos/alpine-sh:latest"},
|
RepoTags: []string{"quay.io/coreos/alpine-sh:latest"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -303,12 +303,12 @@ func TestListImages(t *testing.T) {
|
|||||||
},
|
},
|
||||||
[]kubecontainer.Image{
|
[]kubecontainer.Image{
|
||||||
{
|
{
|
||||||
ID: "sha512-a2fb8f390702",
|
ID: "sha512-a2fb8f390702",
|
||||||
Tags: []string{"quay.io/coreos/alpine-sh:latest"},
|
RepoTags: []string{"quay.io/coreos/alpine-sh:latest"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "sha512-c6b597f42816",
|
ID: "sha512-c6b597f42816",
|
||||||
Tags: []string{"coreos.com/rkt/stage1-coreos:0.10.0"},
|
RepoTags: []string{"coreos.com/rkt/stage1-coreos:0.10.0"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user