ContainerStatus to return container resources
Signed-off-by: ruiwen-zhao <ruiwen@google.com>
This commit is contained in:
parent
6e4b6830f1
commit
b7b1200dd3
@ -58,6 +58,15 @@ func checkMemorySwapLimit(t *testing.T, spec *runtimespec.Spec, memLimit *int64)
|
||||
}
|
||||
}
|
||||
|
||||
func checkMemoryLimitInContainerStatus(t *testing.T, status *runtime.ContainerStatus, memLimit int64) {
|
||||
t.Helper()
|
||||
require.NotNil(t, status)
|
||||
require.NotNil(t, status.Resources)
|
||||
require.NotNil(t, status.Resources.Linux)
|
||||
require.NotNil(t, status.Resources.Linux.MemoryLimitInBytes)
|
||||
assert.Equal(t, memLimit, status.Resources.Linux.MemoryLimitInBytes)
|
||||
}
|
||||
|
||||
func getCgroupSwapLimitForTask(t *testing.T, task containerd.Task) uint64 {
|
||||
if cgroups.Mode() == cgroups.Unified {
|
||||
groupPath, err := cgroupsv2.PidGroupPath(int(task.Pid()))
|
||||
@ -281,3 +290,52 @@ func TestUpdateContainerResources_MemoryLimit(t *testing.T) {
|
||||
memLimit = getCgroupMemoryLimitForTask(t, task)
|
||||
assert.Equal(t, uint64(800*1024*1024), memLimit)
|
||||
}
|
||||
|
||||
func TestUpdateContainerResources_StatusUpdated(t *testing.T) {
|
||||
t.Log("Create a sandbox")
|
||||
sb, sbConfig := PodSandboxConfigWithCleanup(t, "sandbox", "update-container-resources")
|
||||
|
||||
pauseImage := images.Get(images.Pause)
|
||||
EnsureImageExists(t, pauseImage)
|
||||
|
||||
t.Log("Create a container with memory limit")
|
||||
cnConfig := ContainerConfig(
|
||||
"container",
|
||||
pauseImage,
|
||||
WithResources(&runtime.LinuxContainerResources{
|
||||
MemoryLimitInBytes: 200 * 1024 * 1024,
|
||||
}),
|
||||
)
|
||||
cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Check memory limit in container status")
|
||||
status, err := runtimeService.ContainerStatus(cn)
|
||||
checkMemoryLimitInContainerStatus(t, status, 200*1024*1024)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Update container memory limit after created")
|
||||
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
|
||||
MemoryLimitInBytes: 400 * 1024 * 1024,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Check memory limit in container status")
|
||||
status, err = runtimeService.ContainerStatus(cn)
|
||||
checkMemoryLimitInContainerStatus(t, status, 400*1024*1024)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Start the container")
|
||||
require.NoError(t, runtimeService.StartContainer(cn))
|
||||
|
||||
t.Log("Update container memory limit after started")
|
||||
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
|
||||
MemoryLimitInBytes: 800 * 1024 * 1024,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Check memory limit in container status")
|
||||
status, err = runtimeService.ContainerStatus(cn)
|
||||
checkMemoryLimitInContainerStatus(t, status, 800*1024*1024)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -143,6 +143,9 @@ func TestContainerdImage(t *testing.T) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if s.Resources == nil || (s.Resources.Linux == nil && s.Resources.Windows == nil) {
|
||||
return false, fmt.Errorf("No Resource field in container status: %+v", s)
|
||||
}
|
||||
return s.GetState() == runtime.ContainerState_CONTAINER_RUNNING, nil
|
||||
}
|
||||
require.NoError(t, Eventually(checkContainer, 100*time.Millisecond, 10*time.Second))
|
||||
|
@ -261,6 +261,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
|
||||
}()
|
||||
|
||||
status := containerstore.Status{CreatedAt: time.Now().UnixNano()}
|
||||
status = copyResourcesToStatus(spec, status)
|
||||
container, err := containerstore.NewContainer(meta,
|
||||
containerstore.WithStatus(status, containerRootDir),
|
||||
containerstore.WithContainer(cntr),
|
||||
|
@ -60,6 +60,7 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt
|
||||
}
|
||||
}
|
||||
status := toCRIContainerStatus(container, spec, imageRef)
|
||||
|
||||
if status.GetCreatedAt() == 0 {
|
||||
// CRI doesn't allow CreatedAt == 0.
|
||||
info, err := container.Container.Info(ctx)
|
||||
@ -119,6 +120,7 @@ func toCRIContainerStatus(container containerstore.Container, spec *runtime.Imag
|
||||
Annotations: meta.Config.GetAnnotations(),
|
||||
Mounts: meta.Config.GetMounts(),
|
||||
LogPath: meta.LogPath,
|
||||
Resources: status.Resources,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,8 +45,8 @@ func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.Up
|
||||
// Update resources in status update transaction, so that:
|
||||
// 1) There won't be race condition with container start.
|
||||
// 2) There won't be concurrent resource update to the same container.
|
||||
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||
return status, c.updateContainerResources(ctx, container, r, status)
|
||||
if err := container.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) {
|
||||
return c.updateContainerResources(ctx, container, r, status)
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to update resources: %w", err)
|
||||
}
|
||||
@ -56,11 +56,13 @@ func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.Up
|
||||
func (c *criService) updateContainerResources(ctx context.Context,
|
||||
cntr containerstore.Container,
|
||||
r *runtime.UpdateContainerResourcesRequest,
|
||||
status containerstore.Status) (retErr error) {
|
||||
status containerstore.Status) (newStatus containerstore.Status, retErr error) {
|
||||
|
||||
newStatus = status
|
||||
id := cntr.ID
|
||||
// Do not update the container when there is a removal in progress.
|
||||
if status.Removing {
|
||||
return fmt.Errorf("container %q is in removing state", id)
|
||||
return newStatus, fmt.Errorf("container %q is in removing state", id)
|
||||
}
|
||||
|
||||
// Update container spec. If the container is not started yet, updating
|
||||
@ -69,15 +71,15 @@ func (c *criService) updateContainerResources(ctx context.Context,
|
||||
// the spec will become our source of truth for resource limits.
|
||||
oldSpec, err := cntr.Container.Spec(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container spec: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to get container spec: %w", err)
|
||||
}
|
||||
newSpec, err := updateOCIResource(ctx, oldSpec, r, c.config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update resource in spec: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to update resource in spec: %w", err)
|
||||
}
|
||||
|
||||
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
|
||||
return err
|
||||
return newStatus, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
@ -87,32 +89,35 @@ func (c *criService) updateContainerResources(ctx context.Context,
|
||||
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
|
||||
}
|
||||
} else {
|
||||
// Update container status only when the spec is updated
|
||||
newStatus = copyResourcesToStatus(newSpec, status)
|
||||
}
|
||||
}()
|
||||
|
||||
// If container is not running, only update spec is enough, new resource
|
||||
// limit will be applied when container start.
|
||||
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
task, err := cntr.Container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
return fmt.Errorf("failed to get task: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to get task: %w", err)
|
||||
}
|
||||
// newSpec.Linux / newSpec.Windows won't be nil
|
||||
if err := task.Update(ctx, containerd.WithResources(getResources(newSpec))); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
return fmt.Errorf("failed to update resources: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to update resources: %w", err)
|
||||
}
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
// updateContainerSpec updates container spec.
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
"github.com/containerd/containerd/runtime/linux/runctypes"
|
||||
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
"github.com/containerd/typeurl"
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
runhcsoptions "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
|
||||
@ -429,3 +430,83 @@ func getPassthroughAnnotations(podAnnotations map[string]string,
|
||||
}
|
||||
return passthroughAnnotations
|
||||
}
|
||||
|
||||
// copyResourcesToStatus copys container resource contraints from spec to
|
||||
// container status.
|
||||
// This will need updates when new fields are added to ContainerResources.
|
||||
func copyResourcesToStatus(spec *runtimespec.Spec, status containerstore.Status) containerstore.Status {
|
||||
status.Resources = &runtime.ContainerResources{}
|
||||
if spec.Linux != nil {
|
||||
status.Resources.Linux = &runtime.LinuxContainerResources{}
|
||||
|
||||
if spec.Process != nil && spec.Process.OOMScoreAdj != nil {
|
||||
status.Resources.Linux.OomScoreAdj = int64(*spec.Process.OOMScoreAdj)
|
||||
}
|
||||
|
||||
if spec.Linux.Resources == nil {
|
||||
return status
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.CPU != nil {
|
||||
if spec.Linux.Resources.CPU.Period != nil {
|
||||
status.Resources.Linux.CpuPeriod = int64(*spec.Linux.Resources.CPU.Period)
|
||||
}
|
||||
if spec.Linux.Resources.CPU.Quota != nil {
|
||||
status.Resources.Linux.CpuQuota = *spec.Linux.Resources.CPU.Quota
|
||||
}
|
||||
if spec.Linux.Resources.CPU.Shares != nil {
|
||||
status.Resources.Linux.CpuShares = int64(*spec.Linux.Resources.CPU.Shares)
|
||||
}
|
||||
status.Resources.Linux.CpusetCpus = spec.Linux.Resources.CPU.Cpus
|
||||
status.Resources.Linux.CpusetMems = spec.Linux.Resources.CPU.Mems
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.Memory != nil {
|
||||
if spec.Linux.Resources.Memory.Limit != nil {
|
||||
status.Resources.Linux.MemoryLimitInBytes = *spec.Linux.Resources.Memory.Limit
|
||||
}
|
||||
if spec.Linux.Resources.Memory.Swap != nil {
|
||||
status.Resources.Linux.MemorySwapLimitInBytes = *spec.Linux.Resources.Memory.Swap
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.HugepageLimits != nil {
|
||||
hugepageLimits := make([]*runtime.HugepageLimit, len(spec.Linux.Resources.HugepageLimits))
|
||||
for _, l := range spec.Linux.Resources.HugepageLimits {
|
||||
hugepageLimits = append(hugepageLimits, &runtime.HugepageLimit{
|
||||
PageSize: l.Pagesize,
|
||||
Limit: l.Limit,
|
||||
})
|
||||
}
|
||||
status.Resources.Linux.HugepageLimits = hugepageLimits
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.Unified != nil {
|
||||
status.Resources.Linux.Unified = spec.Linux.Resources.Unified
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Windows != nil {
|
||||
status.Resources.Windows = &runtime.WindowsContainerResources{}
|
||||
if spec.Windows.Resources == nil {
|
||||
return status
|
||||
}
|
||||
|
||||
if spec.Windows.Resources.CPU != nil {
|
||||
if spec.Windows.Resources.CPU.Shares != nil {
|
||||
status.Resources.Windows.CpuShares = int64(*spec.Windows.Resources.CPU.Shares)
|
||||
status.Resources.Windows.CpuCount = int64(*spec.Windows.Resources.CPU.Count)
|
||||
status.Resources.Windows.CpuMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Windows.Resources.Memory != nil {
|
||||
if spec.Windows.Resources.Memory.Limit != nil {
|
||||
status.Resources.Windows.MemoryLimitInBytes = int64(*spec.Windows.Resources.Memory.Limit)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Figure out how to get RootfsSizeInBytes
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
@ -97,6 +97,8 @@ type Status struct {
|
||||
// Unknown indicates that the container status is not fully loaded.
|
||||
// This field doesn't need to be checkpointed.
|
||||
Unknown bool `json:"-"`
|
||||
// Resources has container runtime resource constraints
|
||||
Resources *runtime.ContainerResources
|
||||
}
|
||||
|
||||
// State returns current state of the container based on the container status.
|
||||
@ -203,7 +205,56 @@ type statusStorage struct {
|
||||
func (s *statusStorage) Get() Status {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.status
|
||||
// Deep copy is needed in case some fields in Status are updated after Get()
|
||||
// is called.
|
||||
return deepCopyOf(s.status)
|
||||
}
|
||||
|
||||
func deepCopyOf(s Status) Status {
|
||||
copy := s
|
||||
// Resources is the only field that is a pointer, and therefore needs
|
||||
// a manual deep copy.
|
||||
// This will need updates when new fields are added to ContainerResources.
|
||||
if s.Resources == nil {
|
||||
return copy
|
||||
}
|
||||
copy.Resources = &runtime.ContainerResources{}
|
||||
if s.Resources != nil && s.Resources.Linux != nil {
|
||||
hugepageLimits := make([]*runtime.HugepageLimit, len(s.Resources.Linux.HugepageLimits))
|
||||
for _, l := range s.Resources.Linux.HugepageLimits {
|
||||
hugepageLimits = append(hugepageLimits, &runtime.HugepageLimit{
|
||||
PageSize: l.PageSize,
|
||||
Limit: l.Limit,
|
||||
})
|
||||
}
|
||||
copy.Resources = &runtime.ContainerResources{
|
||||
Linux: &runtime.LinuxContainerResources{
|
||||
CpuPeriod: s.Resources.Linux.CpuPeriod,
|
||||
CpuQuota: s.Resources.Linux.CpuQuota,
|
||||
CpuShares: s.Resources.Linux.CpuShares,
|
||||
CpusetCpus: s.Resources.Linux.CpusetCpus,
|
||||
CpusetMems: s.Resources.Linux.CpusetMems,
|
||||
MemoryLimitInBytes: s.Resources.Linux.MemoryLimitInBytes,
|
||||
MemorySwapLimitInBytes: s.Resources.Linux.MemorySwapLimitInBytes,
|
||||
OomScoreAdj: s.Resources.Linux.OomScoreAdj,
|
||||
Unified: s.Resources.Linux.Unified,
|
||||
HugepageLimits: hugepageLimits,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if s.Resources != nil && s.Resources.Windows != nil {
|
||||
copy.Resources = &runtime.ContainerResources{
|
||||
Windows: &runtime.WindowsContainerResources{
|
||||
CpuShares: s.Resources.Windows.CpuShares,
|
||||
CpuCount: s.Resources.Windows.CpuCount,
|
||||
CpuMaximum: s.Resources.Windows.CpuMaximum,
|
||||
MemoryLimitInBytes: s.Resources.Windows.MemoryLimitInBytes,
|
||||
RootfsSizeInBytes: s.Resources.Windows.RootfsSizeInBytes,
|
||||
},
|
||||
}
|
||||
}
|
||||
return copy
|
||||
}
|
||||
|
||||
// UpdateSync updates the container status and the on disk checkpoint.
|
||||
|
Loading…
Reference in New Issue
Block a user