Merge pull request #7413 from samuelkarp/cri-integration-sandboxed
cri-integration: propagate ENABLE_CRI_SANDBOXES
This commit is contained in:
commit
e14dca4a40
@ -261,6 +261,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
|
||||
}()
|
||||
|
||||
status := containerstore.Status{CreatedAt: time.Now().UnixNano()}
|
||||
status = copyResourcesToStatus(spec, status)
|
||||
container, err := containerstore.NewContainer(meta,
|
||||
containerstore.WithStatus(status, containerRootDir),
|
||||
containerstore.WithContainer(cntr),
|
||||
|
@ -119,6 +119,7 @@ func toCRIContainerStatus(container containerstore.Container, spec *runtime.Imag
|
||||
Annotations: meta.Config.GetAnnotations(),
|
||||
Mounts: meta.Config.GetMounts(),
|
||||
LogPath: meta.LogPath,
|
||||
Resources: status.Resources,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -24,13 +24,14 @@ import (
|
||||
gocontext "context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/typeurl"
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/typeurl"
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
|
||||
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
|
||||
ctrdutil "github.com/containerd/containerd/pkg/cri/util"
|
||||
@ -45,8 +46,8 @@ func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.Up
|
||||
// Update resources in status update transaction, so that:
|
||||
// 1) There won't be race condition with container start.
|
||||
// 2) There won't be concurrent resource update to the same container.
|
||||
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||
return status, c.updateContainerResources(ctx, container, r, status)
|
||||
if err := container.Status.UpdateSync(func(status containerstore.Status) (containerstore.Status, error) {
|
||||
return c.updateContainerResources(ctx, container, r, status)
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to update resources: %w", err)
|
||||
}
|
||||
@ -56,11 +57,13 @@ func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.Up
|
||||
func (c *criService) updateContainerResources(ctx context.Context,
|
||||
cntr containerstore.Container,
|
||||
r *runtime.UpdateContainerResourcesRequest,
|
||||
status containerstore.Status) (retErr error) {
|
||||
status containerstore.Status) (newStatus containerstore.Status, retErr error) {
|
||||
|
||||
newStatus = status
|
||||
id := cntr.ID
|
||||
// Do not update the container when there is a removal in progress.
|
||||
if status.Removing {
|
||||
return fmt.Errorf("container %q is in removing state", id)
|
||||
return newStatus, fmt.Errorf("container %q is in removing state", id)
|
||||
}
|
||||
|
||||
// Update container spec. If the container is not started yet, updating
|
||||
@ -69,15 +72,15 @@ func (c *criService) updateContainerResources(ctx context.Context,
|
||||
// the spec will become our source of truth for resource limits.
|
||||
oldSpec, err := cntr.Container.Spec(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container spec: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to get container spec: %w", err)
|
||||
}
|
||||
newSpec, err := updateOCIResource(ctx, oldSpec, r, c.config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update resource in spec: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to update resource in spec: %w", err)
|
||||
}
|
||||
|
||||
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
|
||||
return err
|
||||
return newStatus, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
@ -87,32 +90,35 @@ func (c *criService) updateContainerResources(ctx context.Context,
|
||||
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
|
||||
}
|
||||
} else {
|
||||
// Update container status only when the spec is updated
|
||||
newStatus = copyResourcesToStatus(newSpec, status)
|
||||
}
|
||||
}()
|
||||
|
||||
// If container is not running, only update spec is enough, new resource
|
||||
// limit will be applied when container start.
|
||||
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
task, err := cntr.Container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
return fmt.Errorf("failed to get task: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to get task: %w", err)
|
||||
}
|
||||
// newSpec.Linux / newSpec.Windows won't be nil
|
||||
if err := task.Update(ctx, containerd.WithResources(getResources(newSpec))); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
return fmt.Errorf("failed to update resources: %w", err)
|
||||
return newStatus, fmt.Errorf("failed to update resources: %w", err)
|
||||
}
|
||||
return nil
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
// updateContainerSpec updates container spec.
|
||||
|
@ -24,6 +24,10 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/typeurl"
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
@ -37,8 +41,6 @@ import (
|
||||
"github.com/containerd/containerd/reference/docker"
|
||||
"github.com/containerd/containerd/runtime/linux/runctypes"
|
||||
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
"github.com/containerd/typeurl"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
runhcsoptions "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
|
||||
imagedigest "github.com/opencontainers/go-digest"
|
||||
@ -431,3 +433,83 @@ func getPassthroughAnnotations(podAnnotations map[string]string,
|
||||
}
|
||||
return passthroughAnnotations
|
||||
}
|
||||
|
||||
// copyResourcesToStatus copys container resource contraints from spec to
|
||||
// container status.
|
||||
// This will need updates when new fields are added to ContainerResources.
|
||||
func copyResourcesToStatus(spec *runtimespec.Spec, status containerstore.Status) containerstore.Status {
|
||||
status.Resources = &runtime.ContainerResources{}
|
||||
if spec.Linux != nil {
|
||||
status.Resources.Linux = &runtime.LinuxContainerResources{}
|
||||
|
||||
if spec.Process != nil && spec.Process.OOMScoreAdj != nil {
|
||||
status.Resources.Linux.OomScoreAdj = int64(*spec.Process.OOMScoreAdj)
|
||||
}
|
||||
|
||||
if spec.Linux.Resources == nil {
|
||||
return status
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.CPU != nil {
|
||||
if spec.Linux.Resources.CPU.Period != nil {
|
||||
status.Resources.Linux.CpuPeriod = int64(*spec.Linux.Resources.CPU.Period)
|
||||
}
|
||||
if spec.Linux.Resources.CPU.Quota != nil {
|
||||
status.Resources.Linux.CpuQuota = *spec.Linux.Resources.CPU.Quota
|
||||
}
|
||||
if spec.Linux.Resources.CPU.Shares != nil {
|
||||
status.Resources.Linux.CpuShares = int64(*spec.Linux.Resources.CPU.Shares)
|
||||
}
|
||||
status.Resources.Linux.CpusetCpus = spec.Linux.Resources.CPU.Cpus
|
||||
status.Resources.Linux.CpusetMems = spec.Linux.Resources.CPU.Mems
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.Memory != nil {
|
||||
if spec.Linux.Resources.Memory.Limit != nil {
|
||||
status.Resources.Linux.MemoryLimitInBytes = *spec.Linux.Resources.Memory.Limit
|
||||
}
|
||||
if spec.Linux.Resources.Memory.Swap != nil {
|
||||
status.Resources.Linux.MemorySwapLimitInBytes = *spec.Linux.Resources.Memory.Swap
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.HugepageLimits != nil {
|
||||
hugepageLimits := make([]*runtime.HugepageLimit, len(spec.Linux.Resources.HugepageLimits))
|
||||
for _, l := range spec.Linux.Resources.HugepageLimits {
|
||||
hugepageLimits = append(hugepageLimits, &runtime.HugepageLimit{
|
||||
PageSize: l.Pagesize,
|
||||
Limit: l.Limit,
|
||||
})
|
||||
}
|
||||
status.Resources.Linux.HugepageLimits = hugepageLimits
|
||||
}
|
||||
|
||||
if spec.Linux.Resources.Unified != nil {
|
||||
status.Resources.Linux.Unified = spec.Linux.Resources.Unified
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Windows != nil {
|
||||
status.Resources.Windows = &runtime.WindowsContainerResources{}
|
||||
if spec.Windows.Resources == nil {
|
||||
return status
|
||||
}
|
||||
|
||||
if spec.Windows.Resources.CPU != nil {
|
||||
if spec.Windows.Resources.CPU.Shares != nil {
|
||||
status.Resources.Windows.CpuShares = int64(*spec.Windows.Resources.CPU.Shares)
|
||||
status.Resources.Windows.CpuCount = int64(*spec.Windows.Resources.CPU.Count)
|
||||
status.Resources.Windows.CpuMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Windows.Resources.Memory != nil {
|
||||
if spec.Windows.Resources.Memory.Limit != nil {
|
||||
status.Resources.Windows.MemoryLimitInBytes = int64(*spec.Windows.Resources.Memory.Limit)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Figure out how to get RootfsSizeInBytes
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
@ -165,8 +165,12 @@ run_containerd() {
|
||||
local report_dir=$1
|
||||
fi
|
||||
CMD=""
|
||||
if [ ! -z "${sudo}" ]; then
|
||||
if [ -n "${sudo}" ]; then
|
||||
CMD+="${sudo} "
|
||||
# sudo strips environment variables, so add ENABLE_CRI_SANDBOXES back if present
|
||||
if [ -n "${ENABLE_CRI_SANDBOXES}" ]; then
|
||||
CMD+="ENABLE_CRI_SANDBOXES='${ENABLE_CRI_SANDBOXES}' "
|
||||
fi
|
||||
fi
|
||||
CMD+="${PWD}/bin/containerd"
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user