Adds Windows resource limits support
This will allow running Windows Containers to have their resource limits updated through containerd. The CPU resource limits support has been added for Windows Server 20H2 and newer, on older versions hcsshim will raise an Unimplemented error. Signed-off-by: Claudiu Belu <cbelu@cloudbasesolutions.com>
This commit is contained in:
parent
5b29542cbe
commit
2bc77b8a28
@ -67,7 +67,7 @@ func TestUpdateContainerResources(t *testing.T) {
|
||||
t.Log("Update container memory limit after created")
|
||||
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
|
||||
MemoryLimitInBytes: 400 * 1024 * 1024,
|
||||
})
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Check memory limit in container OCI spec")
|
||||
@ -90,7 +90,7 @@ func TestUpdateContainerResources(t *testing.T) {
|
||||
t.Log("Update container memory limit after started")
|
||||
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
|
||||
MemoryLimitInBytes: 800 * 1024 * 1024,
|
||||
})
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Check memory limit in container OCI spec")
|
||||
|
@ -63,7 +63,7 @@ type ContainerManager interface {
|
||||
// ContainerStatus returns the status of the container.
|
||||
ContainerStatus(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStatus, error)
|
||||
// UpdateContainerResources updates the cgroup resources for the container.
|
||||
UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, opts ...grpc.CallOption) error
|
||||
UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error
|
||||
// ExecSync executes a command in the container, and returns the stdout output.
|
||||
// If command exits with a non-zero exit code, an error is returned.
|
||||
ExecSync(containerID string, cmd []string, timeout time.Duration, opts ...grpc.CallOption) (stdout []byte, stderr []byte, err error)
|
||||
|
@ -361,7 +361,7 @@ func (r *RuntimeService) ContainerStatus(containerID string, opts ...grpc.CallOp
|
||||
}
|
||||
|
||||
// UpdateContainerResources updates a containers resource config
|
||||
func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, opts ...grpc.CallOption) error {
|
||||
func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error {
|
||||
klog.V(10).Infof("[RuntimeService] UpdateContainerResources (containerID=%v, timeout=%v)", containerID, r.timeout)
|
||||
ctx, cancel := getContextWithTimeout(r.timeout)
|
||||
defer cancel()
|
||||
@ -369,6 +369,7 @@ func (r *RuntimeService) UpdateContainerResources(containerID string, resources
|
||||
_, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{
|
||||
ContainerId: containerID,
|
||||
Linux: resources,
|
||||
Windows: windowsResources,
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
klog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err)
|
||||
|
@ -111,12 +111,15 @@ func TestTruncIndex(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cn, cStats.Attributes.Id)
|
||||
|
||||
t.Logf("Update container memory limit after started")
|
||||
if goruntime.GOOS != "windows" {
|
||||
// TODO(claudiub): remove this when UpdateContainerResources works on running Windows Containers.
|
||||
// https://github.com/containerd/containerd/issues/5187
|
||||
t.Logf("Update container memory limit after started")
|
||||
err = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{
|
||||
MemoryLimitInBytes: 50 * 1024 * 1024,
|
||||
}, nil)
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
err = runtimeService.UpdateContainerResources(cnTruncIndex, nil, &runtimeapi.WindowsContainerResources{
|
||||
MemoryLimitInBytes: 50 * 1024 * 1024,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
@ -174,9 +174,6 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec
|
||||
if s.Windows.Resources == nil {
|
||||
s.Windows.Resources = &runtimespec.WindowsResources{}
|
||||
}
|
||||
if s.Windows.Resources.CPU == nil {
|
||||
s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{}
|
||||
}
|
||||
if s.Windows.Resources.Memory == nil {
|
||||
s.Windows.Resources.Memory = &runtimespec.WindowsMemoryResources{}
|
||||
}
|
||||
@ -187,6 +184,9 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec
|
||||
max = uint16(resources.GetCpuMaximum())
|
||||
limit = uint64(resources.GetMemoryLimitInBytes())
|
||||
)
|
||||
if s.Windows.Resources.CPU == nil && (count != 0 || shares != 0 || max != 0) {
|
||||
s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{}
|
||||
}
|
||||
if count != 0 {
|
||||
s.Windows.Resources.CPU.Count = &count
|
||||
}
|
||||
|
131
pkg/cri/server/container_update_resources.go
Normal file
131
pkg/cri/server/container_update_resources.go
Normal file
@ -0,0 +1,131 @@
|
||||
//go:build !darwin && !freebsd
|
||||
// +build !darwin,!freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
gocontext "context"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/typeurl"
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
|
||||
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
|
||||
ctrdutil "github.com/containerd/containerd/pkg/cri/util"
|
||||
)
|
||||
|
||||
// UpdateContainerResources updates ContainerConfig of the container.
|
||||
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) {
|
||||
container, err := c.containerStore.Get(r.GetContainerId())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to find container")
|
||||
}
|
||||
// Update resources in status update transaction, so that:
|
||||
// 1) There won't be race condition with container start.
|
||||
// 2) There won't be concurrent resource update to the same container.
|
||||
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||
return status, c.updateContainerResources(ctx, container, r, status)
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to update resources")
|
||||
}
|
||||
return &runtime.UpdateContainerResourcesResponse{}, nil
|
||||
}
|
||||
|
||||
func (c *criService) updateContainerResources(ctx context.Context,
|
||||
cntr containerstore.Container,
|
||||
r *runtime.UpdateContainerResourcesRequest,
|
||||
status containerstore.Status) (retErr error) {
|
||||
id := cntr.ID
|
||||
// Do not update the container when there is a removal in progress.
|
||||
if status.Removing {
|
||||
return errors.Errorf("container %q is in removing state", id)
|
||||
}
|
||||
|
||||
// Update container spec. If the container is not started yet, updating
|
||||
// spec makes sure that the resource limits are correct when start;
|
||||
// if the container is already started, updating spec is still required,
|
||||
// the spec will become our source of truth for resource limits.
|
||||
oldSpec, err := cntr.Container.Spec(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get container spec")
|
||||
}
|
||||
newSpec, err := updateOCIResource(ctx, oldSpec, r, c.config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to update resource in spec")
|
||||
}
|
||||
|
||||
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
deferCtx, deferCancel := ctrdutil.DeferContext()
|
||||
defer deferCancel()
|
||||
// Reset spec on error.
|
||||
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// If container is not running, only update spec is enough, new resource
|
||||
// limit will be applied when container start.
|
||||
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
|
||||
return nil
|
||||
}
|
||||
|
||||
task, err := cntr.Container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to get task")
|
||||
}
|
||||
// newSpec.Linux / newSpec.Windows won't be nil
|
||||
if err := task.Update(ctx, containerd.WithResources(getResources(newSpec))); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to update resources")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateContainerSpec updates container spec.
|
||||
func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error {
|
||||
any, err := typeurl.MarshalAny(spec)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to marshal spec %+v", spec)
|
||||
}
|
||||
if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error {
|
||||
c.Spec = any
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to update container spec")
|
||||
}
|
||||
return nil
|
||||
}
|
@ -17,122 +17,20 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
gocontext "context"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/typeurl"
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
|
||||
criconfig "github.com/containerd/containerd/pkg/cri/config"
|
||||
"github.com/containerd/containerd/pkg/cri/opts"
|
||||
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
|
||||
"github.com/containerd/containerd/pkg/cri/util"
|
||||
ctrdutil "github.com/containerd/containerd/pkg/cri/util"
|
||||
)
|
||||
|
||||
// UpdateContainerResources updates ContainerConfig of the container.
|
||||
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) {
|
||||
container, err := c.containerStore.Get(r.GetContainerId())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to find container")
|
||||
}
|
||||
// Update resources in status update transaction, so that:
|
||||
// 1) There won't be race condition with container start.
|
||||
// 2) There won't be concurrent resource update to the same container.
|
||||
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||
return status, c.updateContainerResources(ctx, container, r.GetLinux(), status)
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to update resources")
|
||||
}
|
||||
return &runtime.UpdateContainerResourcesResponse{}, nil
|
||||
}
|
||||
// updateOCIResource updates container resource limit.
|
||||
func updateOCIResource(ctx context.Context, spec *runtimespec.Spec, r *runtime.UpdateContainerResourcesRequest,
|
||||
config criconfig.Config) (*runtimespec.Spec, error) {
|
||||
|
||||
func (c *criService) updateContainerResources(ctx context.Context,
|
||||
cntr containerstore.Container,
|
||||
resources *runtime.LinuxContainerResources,
|
||||
status containerstore.Status) (retErr error) {
|
||||
id := cntr.ID
|
||||
// Do not update the container when there is a removal in progress.
|
||||
if status.Removing {
|
||||
return errors.Errorf("container %q is in removing state", id)
|
||||
}
|
||||
|
||||
// Update container spec. If the container is not started yet, updating
|
||||
// spec makes sure that the resource limits are correct when start;
|
||||
// if the container is already started, updating spec is still required,
|
||||
// the spec will become our source of truth for resource limits.
|
||||
oldSpec, err := cntr.Container.Spec(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get container spec")
|
||||
}
|
||||
newSpec, err := updateOCILinuxResource(ctx, oldSpec, resources,
|
||||
c.config.TolerateMissingHugetlbController, c.config.DisableHugetlbController)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to update resource in spec")
|
||||
}
|
||||
|
||||
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
deferCtx, deferCancel := ctrdutil.DeferContext()
|
||||
defer deferCancel()
|
||||
// Reset spec on error.
|
||||
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// If container is not running, only update spec is enough, new resource
|
||||
// limit will be applied when container start.
|
||||
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
|
||||
return nil
|
||||
}
|
||||
|
||||
task, err := cntr.Container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to get task")
|
||||
}
|
||||
// newSpec.Linux won't be nil
|
||||
if err := task.Update(ctx, containerd.WithResources(newSpec.Linux.Resources)); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task exited already.
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to update resources")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateContainerSpec updates container spec.
|
||||
func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error {
|
||||
any, err := typeurl.MarshalAny(spec)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to marshal spec %+v", spec)
|
||||
}
|
||||
if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error {
|
||||
c.Spec = any
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to update container spec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateOCILinuxResource updates container resource limit.
|
||||
func updateOCILinuxResource(ctx context.Context, spec *runtimespec.Spec, new *runtime.LinuxContainerResources,
|
||||
tolerateMissingHugetlbController, disableHugetlbController bool) (*runtimespec.Spec, error) {
|
||||
// Copy to make sure old spec is not changed.
|
||||
var cloned runtimespec.Spec
|
||||
if err := util.DeepCopy(&cloned, spec); err != nil {
|
||||
@ -141,8 +39,12 @@ func updateOCILinuxResource(ctx context.Context, spec *runtimespec.Spec, new *ru
|
||||
if cloned.Linux == nil {
|
||||
cloned.Linux = &runtimespec.Linux{}
|
||||
}
|
||||
if err := opts.WithResources(new, tolerateMissingHugetlbController, disableHugetlbController)(ctx, nil, nil, &cloned); err != nil {
|
||||
if err := opts.WithResources(r.GetLinux(), config.TolerateMissingHugetlbController, config.DisableHugetlbController)(ctx, nil, nil, &cloned); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to set linux container resources")
|
||||
}
|
||||
return &cloned, nil
|
||||
}
|
||||
|
||||
func getResources(spec *runtimespec.Spec) interface{} {
|
||||
return spec.Linux.Resources
|
||||
}
|
||||
|
@ -24,6 +24,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
|
||||
criconfig "github.com/containerd/containerd/pkg/cri/config"
|
||||
)
|
||||
|
||||
func TestUpdateOCILinuxResource(t *testing.T) {
|
||||
@ -31,7 +33,7 @@ func TestUpdateOCILinuxResource(t *testing.T) {
|
||||
*oomscoreadj = -500
|
||||
for desc, test := range map[string]struct {
|
||||
spec *runtimespec.Spec
|
||||
resources *runtime.LinuxContainerResources
|
||||
request *runtime.UpdateContainerResourcesRequest
|
||||
expected *runtimespec.Spec
|
||||
expectErr bool
|
||||
}{
|
||||
@ -52,15 +54,17 @@ func TestUpdateOCILinuxResource(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
resources: &runtime.LinuxContainerResources{
|
||||
CpuPeriod: 6666,
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetCpus: "4-5",
|
||||
CpusetMems: "6-7",
|
||||
Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "0"},
|
||||
request: &runtime.UpdateContainerResourcesRequest{
|
||||
Linux: &runtime.LinuxContainerResources{
|
||||
CpuPeriod: 6666,
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetCpus: "4-5",
|
||||
CpusetMems: "6-7",
|
||||
Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "0"},
|
||||
},
|
||||
},
|
||||
expected: &runtimespec.Spec{
|
||||
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
|
||||
@ -96,12 +100,14 @@ func TestUpdateOCILinuxResource(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
resources: &runtime.LinuxContainerResources{
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetMems: "6-7",
|
||||
request: &runtime.UpdateContainerResourcesRequest{
|
||||
Linux: &runtime.LinuxContainerResources{
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetMems: "6-7",
|
||||
},
|
||||
},
|
||||
expected: &runtimespec.Spec{
|
||||
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
|
||||
@ -129,15 +135,17 @@ func TestUpdateOCILinuxResource(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
resources: &runtime.LinuxContainerResources{
|
||||
CpuPeriod: 6666,
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetCpus: "4-5",
|
||||
CpusetMems: "6-7",
|
||||
Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"},
|
||||
request: &runtime.UpdateContainerResourcesRequest{
|
||||
Linux: &runtime.LinuxContainerResources{
|
||||
CpuPeriod: 6666,
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetCpus: "4-5",
|
||||
CpusetMems: "6-7",
|
||||
Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"},
|
||||
},
|
||||
},
|
||||
expected: &runtimespec.Spec{
|
||||
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
|
||||
@ -173,15 +181,17 @@ func TestUpdateOCILinuxResource(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
resources: &runtime.LinuxContainerResources{
|
||||
CpuPeriod: 6666,
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetCpus: "4-5",
|
||||
CpusetMems: "6-7",
|
||||
Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "1024"},
|
||||
request: &runtime.UpdateContainerResourcesRequest{
|
||||
Linux: &runtime.LinuxContainerResources{
|
||||
CpuPeriod: 6666,
|
||||
CpuQuota: 5555,
|
||||
CpuShares: 4444,
|
||||
MemoryLimitInBytes: 54321,
|
||||
OomScoreAdj: 500,
|
||||
CpusetCpus: "4-5",
|
||||
CpusetMems: "6-7",
|
||||
Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "1024"},
|
||||
},
|
||||
},
|
||||
expected: &runtimespec.Spec{
|
||||
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
|
||||
@ -202,7 +212,13 @@ func TestUpdateOCILinuxResource(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase %q", desc)
|
||||
got, err := updateOCILinuxResource(context.Background(), test.spec, test.resources, false, false)
|
||||
config := criconfig.Config{
|
||||
PluginConfig: criconfig.PluginConfig{
|
||||
TolerateMissingHugetlbController: false,
|
||||
DisableHugetlbController: false,
|
||||
},
|
||||
}
|
||||
got, err := updateOCIResource(context.Background(), test.spec, test.request, config)
|
||||
if test.expectErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
|
@ -17,13 +17,34 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
|
||||
criconfig "github.com/containerd/containerd/pkg/cri/config"
|
||||
"github.com/containerd/containerd/pkg/cri/opts"
|
||||
"github.com/containerd/containerd/pkg/cri/util"
|
||||
)
|
||||
|
||||
// UpdateContainerResources updates ContainerConfig of the container.
|
||||
// TODO(windows): Figure out whether windows support this.
|
||||
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (*runtime.UpdateContainerResourcesResponse, error) {
|
||||
return nil, errdefs.ErrNotImplemented
|
||||
// updateOCIResource updates container resource limit.
|
||||
func updateOCIResource(ctx context.Context, spec *runtimespec.Spec, r *runtime.UpdateContainerResourcesRequest,
|
||||
config criconfig.Config) (*runtimespec.Spec, error) {
|
||||
|
||||
// Copy to make sure old spec is not changed.
|
||||
var cloned runtimespec.Spec
|
||||
if err := util.DeepCopy(&cloned, spec); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to deep copy")
|
||||
}
|
||||
if cloned.Windows == nil {
|
||||
cloned.Windows = &runtimespec.Windows{}
|
||||
}
|
||||
if err := opts.WithWindowsResources(r.GetWindows())(ctx, nil, nil, &cloned); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to set windows container resources")
|
||||
}
|
||||
return &cloned, nil
|
||||
}
|
||||
|
||||
func getResources(spec *runtimespec.Spec) interface{} {
|
||||
return spec.Windows.Resources
|
||||
}
|
||||
|
@ -878,7 +878,7 @@ func (in *instrumentedService) UpdateContainerResources(ctx context.Context, r *
|
||||
if err := in.checkInitialized(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.G(ctx).Infof("UpdateContainerResources for %q with %+v", r.GetContainerId(), r.GetLinux())
|
||||
log.G(ctx).Infof("UpdateContainerResources for %q with Linux: %+v / Windows: %+v", r.GetContainerId(), r.GetLinux(), r.GetWindows())
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId())
|
||||
@ -894,7 +894,7 @@ func (in *instrumentedAlphaService) UpdateContainerResources(ctx context.Context
|
||||
if err := in.checkInitialized(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.G(ctx).Infof("UpdateContainerResources for %q with %+v", r.GetContainerId(), r.GetLinux())
|
||||
log.G(ctx).Infof("UpdateContainerResources for %q with Linux: %+v / Windows: %+v", r.GetContainerId(), r.GetLinux(), r.GetWindows())
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId())
|
||||
|
Loading…
Reference in New Issue
Block a user