ContainerStatus to return container resources

Signed-off-by: ruiwen-zhao <ruiwen@google.com>
This commit is contained in:
ruiwen-zhao
2022-08-10 03:56:56 +00:00
parent 6e4b6830f1
commit b7b1200dd3
7 changed files with 215 additions and 14 deletions

View File

@@ -261,6 +261,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
}()
status := containerstore.Status{CreatedAt: time.Now().UnixNano()}
status = copyResourcesToStatus(spec, status)
container, err := containerstore.NewContainer(meta,
containerstore.WithStatus(status, containerRootDir),
containerstore.WithContainer(cntr),

View File

@@ -60,6 +60,7 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt
}
}
status := toCRIContainerStatus(container, spec, imageRef)
if status.GetCreatedAt() == 0 {
// CRI doesn't allow CreatedAt == 0.
info, err := container.Container.Info(ctx)
@@ -119,6 +120,7 @@ func toCRIContainerStatus(container containerstore.Container, spec *runtime.Imag
Annotations: meta.Config.GetAnnotations(),
Mounts: meta.Config.GetMounts(),
LogPath: meta.LogPath,
Resources: status.Resources,
}
}

View File

@@ -45,8 +45,8 @@ func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.Up
// Update resources in status update transaction, so that:
// 1) There won't be race condition with container start.
// 2) There won't be concurrent resource update to the same container.
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
return status, c.updateContainerResources(ctx, container, r, status)
if err := container.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) {
return c.updateContainerResources(ctx, container, r, status)
}); err != nil {
return nil, fmt.Errorf("failed to update resources: %w", err)
}
@@ -56,11 +56,13 @@ func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.Up
func (c *criService) updateContainerResources(ctx context.Context,
cntr containerstore.Container,
r *runtime.UpdateContainerResourcesRequest,
status containerstore.Status) (retErr error) {
status containerstore.Status) (newStatus containerstore.Status, retErr error) {
newStatus = status
id := cntr.ID
// Do not update the container when there is a removal in progress.
if status.Removing {
return fmt.Errorf("container %q is in removing state", id)
return newStatus, fmt.Errorf("container %q is in removing state", id)
}
// Update container spec. If the container is not started yet, updating
@@ -69,15 +71,15 @@ func (c *criService) updateContainerResources(ctx context.Context,
// the spec will become our source of truth for resource limits.
oldSpec, err := cntr.Container.Spec(ctx)
if err != nil {
return fmt.Errorf("failed to get container spec: %w", err)
return newStatus, fmt.Errorf("failed to get container spec: %w", err)
}
newSpec, err := updateOCIResource(ctx, oldSpec, r, c.config)
if err != nil {
return fmt.Errorf("failed to update resource in spec: %w", err)
return newStatus, fmt.Errorf("failed to update resource in spec: %w", err)
}
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
return err
return newStatus, err
}
defer func() {
if retErr != nil {
@@ -87,32 +89,35 @@ func (c *criService) updateContainerResources(ctx context.Context,
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
}
} else {
// Update container status only when the spec is updated
newStatus = copyResourcesToStatus(newSpec, status)
}
}()
// If container is not running, only update spec is enough, new resource
// limit will be applied when container start.
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
return nil
return newStatus, nil
}
task, err := cntr.Container.Task(ctx, nil)
if err != nil {
if errdefs.IsNotFound(err) {
// Task exited already.
return nil
return newStatus, nil
}
return fmt.Errorf("failed to get task: %w", err)
return newStatus, fmt.Errorf("failed to get task: %w", err)
}
// newSpec.Linux / newSpec.Windows won't be nil
if err := task.Update(ctx, containerd.WithResources(getResources(newSpec))); err != nil {
if errdefs.IsNotFound(err) {
// Task exited already.
return nil
return newStatus, nil
}
return fmt.Errorf("failed to update resources: %w", err)
return newStatus, fmt.Errorf("failed to update resources: %w", err)
}
return nil
return newStatus, nil
}
// updateContainerSpec updates container spec.

View File

@@ -38,6 +38,7 @@ import (
"github.com/containerd/containerd/runtime/linux/runctypes"
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/typeurl"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
runhcsoptions "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
@@ -429,3 +430,83 @@ func getPassthroughAnnotations(podAnnotations map[string]string,
}
return passthroughAnnotations
}
// copyResourcesToStatus copys container resource contraints from spec to
// container status.
// This will need updates when new fields are added to ContainerResources.
func copyResourcesToStatus(spec *runtimespec.Spec, status containerstore.Status) containerstore.Status {
status.Resources = &runtime.ContainerResources{}
if spec.Linux != nil {
status.Resources.Linux = &runtime.LinuxContainerResources{}
if spec.Process != nil && spec.Process.OOMScoreAdj != nil {
status.Resources.Linux.OomScoreAdj = int64(*spec.Process.OOMScoreAdj)
}
if spec.Linux.Resources == nil {
return status
}
if spec.Linux.Resources.CPU != nil {
if spec.Linux.Resources.CPU.Period != nil {
status.Resources.Linux.CpuPeriod = int64(*spec.Linux.Resources.CPU.Period)
}
if spec.Linux.Resources.CPU.Quota != nil {
status.Resources.Linux.CpuQuota = *spec.Linux.Resources.CPU.Quota
}
if spec.Linux.Resources.CPU.Shares != nil {
status.Resources.Linux.CpuShares = int64(*spec.Linux.Resources.CPU.Shares)
}
status.Resources.Linux.CpusetCpus = spec.Linux.Resources.CPU.Cpus
status.Resources.Linux.CpusetMems = spec.Linux.Resources.CPU.Mems
}
if spec.Linux.Resources.Memory != nil {
if spec.Linux.Resources.Memory.Limit != nil {
status.Resources.Linux.MemoryLimitInBytes = *spec.Linux.Resources.Memory.Limit
}
if spec.Linux.Resources.Memory.Swap != nil {
status.Resources.Linux.MemorySwapLimitInBytes = *spec.Linux.Resources.Memory.Swap
}
}
if spec.Linux.Resources.HugepageLimits != nil {
hugepageLimits := make([]*runtime.HugepageLimit, len(spec.Linux.Resources.HugepageLimits))
for _, l := range spec.Linux.Resources.HugepageLimits {
hugepageLimits = append(hugepageLimits, &runtime.HugepageLimit{
PageSize: l.Pagesize,
Limit: l.Limit,
})
}
status.Resources.Linux.HugepageLimits = hugepageLimits
}
if spec.Linux.Resources.Unified != nil {
status.Resources.Linux.Unified = spec.Linux.Resources.Unified
}
}
if spec.Windows != nil {
status.Resources.Windows = &runtime.WindowsContainerResources{}
if spec.Windows.Resources == nil {
return status
}
if spec.Windows.Resources.CPU != nil {
if spec.Windows.Resources.CPU.Shares != nil {
status.Resources.Windows.CpuShares = int64(*spec.Windows.Resources.CPU.Shares)
status.Resources.Windows.CpuCount = int64(*spec.Windows.Resources.CPU.Count)
status.Resources.Windows.CpuMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
}
}
if spec.Windows.Resources.Memory != nil {
if spec.Windows.Resources.Memory.Limit != nil {
status.Resources.Windows.MemoryLimitInBytes = int64(*spec.Windows.Resources.Memory.Limit)
}
}
// TODO: Figure out how to get RootfsSizeInBytes
}
return status
}

View File

@@ -97,6 +97,8 @@ type Status struct {
// Unknown indicates that the container status is not fully loaded.
// This field doesn't need to be checkpointed.
Unknown bool `json:"-"`
// Resources has container runtime resource constraints
Resources *runtime.ContainerResources
}
// State returns current state of the container based on the container status.
@@ -203,7 +205,56 @@ type statusStorage struct {
func (s *statusStorage) Get() Status {
s.RLock()
defer s.RUnlock()
return s.status
// Deep copy is needed in case some fields in Status are updated after Get()
// is called.
return deepCopyOf(s.status)
}
func deepCopyOf(s Status) Status {
copy := s
// Resources is the only field that is a pointer, and therefore needs
// a manual deep copy.
// This will need updates when new fields are added to ContainerResources.
if s.Resources == nil {
return copy
}
copy.Resources = &runtime.ContainerResources{}
if s.Resources != nil && s.Resources.Linux != nil {
hugepageLimits := make([]*runtime.HugepageLimit, len(s.Resources.Linux.HugepageLimits))
for _, l := range s.Resources.Linux.HugepageLimits {
hugepageLimits = append(hugepageLimits, &runtime.HugepageLimit{
PageSize: l.PageSize,
Limit: l.Limit,
})
}
copy.Resources = &runtime.ContainerResources{
Linux: &runtime.LinuxContainerResources{
CpuPeriod: s.Resources.Linux.CpuPeriod,
CpuQuota: s.Resources.Linux.CpuQuota,
CpuShares: s.Resources.Linux.CpuShares,
CpusetCpus: s.Resources.Linux.CpusetCpus,
CpusetMems: s.Resources.Linux.CpusetMems,
MemoryLimitInBytes: s.Resources.Linux.MemoryLimitInBytes,
MemorySwapLimitInBytes: s.Resources.Linux.MemorySwapLimitInBytes,
OomScoreAdj: s.Resources.Linux.OomScoreAdj,
Unified: s.Resources.Linux.Unified,
HugepageLimits: hugepageLimits,
},
}
}
if s.Resources != nil && s.Resources.Windows != nil {
copy.Resources = &runtime.ContainerResources{
Windows: &runtime.WindowsContainerResources{
CpuShares: s.Resources.Windows.CpuShares,
CpuCount: s.Resources.Windows.CpuCount,
CpuMaximum: s.Resources.Windows.CpuMaximum,
MemoryLimitInBytes: s.Resources.Windows.MemoryLimitInBytes,
RootfsSizeInBytes: s.Resources.Windows.RootfsSizeInBytes,
},
}
}
return copy
}
// UpdateSync updates the container status and the on disk checkpoint.