Adds Windows resource limits support

This will allow running Windows Containers to have their resource
limits updated through containerd. The CPU resource limits support
has been added for Windows Server 20H2 and newer, on older versions
hcsshim will raise an Unimplemented error.

Signed-off-by: Claudiu Belu <cbelu@cloudbasesolutions.com>
This commit is contained in:
Claudiu Belu 2021-07-21 19:41:59 -07:00
parent 5b29542cbe
commit 2bc77b8a28
10 changed files with 233 additions and 159 deletions

View File

@ -67,7 +67,7 @@ func TestUpdateContainerResources(t *testing.T) {
t.Log("Update container memory limit after created") t.Log("Update container memory limit after created")
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{ err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
MemoryLimitInBytes: 400 * 1024 * 1024, MemoryLimitInBytes: 400 * 1024 * 1024,
}) }, nil)
require.NoError(t, err) require.NoError(t, err)
t.Log("Check memory limit in container OCI spec") t.Log("Check memory limit in container OCI spec")
@ -90,7 +90,7 @@ func TestUpdateContainerResources(t *testing.T) {
t.Log("Update container memory limit after started") t.Log("Update container memory limit after started")
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{ err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
MemoryLimitInBytes: 800 * 1024 * 1024, MemoryLimitInBytes: 800 * 1024 * 1024,
}) }, nil)
require.NoError(t, err) require.NoError(t, err)
t.Log("Check memory limit in container OCI spec") t.Log("Check memory limit in container OCI spec")

View File

@ -63,7 +63,7 @@ type ContainerManager interface {
// ContainerStatus returns the status of the container. // ContainerStatus returns the status of the container.
ContainerStatus(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStatus, error) ContainerStatus(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStatus, error)
// UpdateContainerResources updates the cgroup resources for the container. // UpdateContainerResources updates the cgroup resources for the container.
UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, opts ...grpc.CallOption) error UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error
// ExecSync executes a command in the container, and returns the stdout output. // ExecSync executes a command in the container, and returns the stdout output.
// If command exits with a non-zero exit code, an error is returned. // If command exits with a non-zero exit code, an error is returned.
ExecSync(containerID string, cmd []string, timeout time.Duration, opts ...grpc.CallOption) (stdout []byte, stderr []byte, err error) ExecSync(containerID string, cmd []string, timeout time.Duration, opts ...grpc.CallOption) (stdout []byte, stderr []byte, err error)

View File

@ -361,7 +361,7 @@ func (r *RuntimeService) ContainerStatus(containerID string, opts ...grpc.CallOp
} }
// UpdateContainerResources updates a containers resource config // UpdateContainerResources updates a containers resource config
func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, opts ...grpc.CallOption) error { func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error {
klog.V(10).Infof("[RuntimeService] UpdateContainerResources (containerID=%v, timeout=%v)", containerID, r.timeout) klog.V(10).Infof("[RuntimeService] UpdateContainerResources (containerID=%v, timeout=%v)", containerID, r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
@ -369,6 +369,7 @@ func (r *RuntimeService) UpdateContainerResources(containerID string, resources
_, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{ _, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{
ContainerId: containerID, ContainerId: containerID,
Linux: resources, Linux: resources,
Windows: windowsResources,
}, opts...) }, opts...)
if err != nil { if err != nil {
klog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err) klog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err)

View File

@ -111,12 +111,15 @@ func TestTruncIndex(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, cn, cStats.Attributes.Id) assert.Equal(t, cn, cStats.Attributes.Id)
if goruntime.GOOS != "windows" {
// TODO(claudiub): remove this when UpdateContainerResources works on running Windows Containers.
// https://github.com/containerd/containerd/issues/5187
t.Logf("Update container memory limit after started") t.Logf("Update container memory limit after started")
if goruntime.GOOS != "windows" {
err = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{ err = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{
MemoryLimitInBytes: 50 * 1024 * 1024, MemoryLimitInBytes: 50 * 1024 * 1024,
}, nil)
assert.NoError(t, err)
} else {
err = runtimeService.UpdateContainerResources(cnTruncIndex, nil, &runtimeapi.WindowsContainerResources{
MemoryLimitInBytes: 50 * 1024 * 1024,
}) })
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -174,9 +174,6 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec
if s.Windows.Resources == nil { if s.Windows.Resources == nil {
s.Windows.Resources = &runtimespec.WindowsResources{} s.Windows.Resources = &runtimespec.WindowsResources{}
} }
if s.Windows.Resources.CPU == nil {
s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{}
}
if s.Windows.Resources.Memory == nil { if s.Windows.Resources.Memory == nil {
s.Windows.Resources.Memory = &runtimespec.WindowsMemoryResources{} s.Windows.Resources.Memory = &runtimespec.WindowsMemoryResources{}
} }
@ -187,6 +184,9 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec
max = uint16(resources.GetCpuMaximum()) max = uint16(resources.GetCpuMaximum())
limit = uint64(resources.GetMemoryLimitInBytes()) limit = uint64(resources.GetMemoryLimitInBytes())
) )
if s.Windows.Resources.CPU == nil && (count != 0 || shares != 0 || max != 0) {
s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{}
}
if count != 0 { if count != 0 {
s.Windows.Resources.CPU.Count = &count s.Windows.Resources.CPU.Count = &count
} }

View File

@ -0,0 +1,131 @@
//go:build !darwin && !freebsd
// +build !darwin,!freebsd
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
gocontext "context"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/typeurl"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/net/context"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
ctrdutil "github.com/containerd/containerd/pkg/cri/util"
)
// UpdateContainerResources updates ContainerConfig of the container.
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) {
container, err := c.containerStore.Get(r.GetContainerId())
if err != nil {
return nil, errors.Wrap(err, "failed to find container")
}
// Update resources in status update transaction, so that:
// 1) There won't be race condition with container start.
// 2) There won't be concurrent resource update to the same container.
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
return status, c.updateContainerResources(ctx, container, r, status)
}); err != nil {
return nil, errors.Wrap(err, "failed to update resources")
}
return &runtime.UpdateContainerResourcesResponse{}, nil
}
func (c *criService) updateContainerResources(ctx context.Context,
cntr containerstore.Container,
r *runtime.UpdateContainerResourcesRequest,
status containerstore.Status) (retErr error) {
id := cntr.ID
// Do not update the container when there is a removal in progress.
if status.Removing {
return errors.Errorf("container %q is in removing state", id)
}
// Update container spec. If the container is not started yet, updating
// spec makes sure that the resource limits are correct when start;
// if the container is already started, updating spec is still required,
// the spec will become our source of truth for resource limits.
oldSpec, err := cntr.Container.Spec(ctx)
if err != nil {
return errors.Wrap(err, "failed to get container spec")
}
newSpec, err := updateOCIResource(ctx, oldSpec, r, c.config)
if err != nil {
return errors.Wrap(err, "failed to update resource in spec")
}
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
return err
}
defer func() {
if retErr != nil {
deferCtx, deferCancel := ctrdutil.DeferContext()
defer deferCancel()
// Reset spec on error.
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
}
}
}()
// If container is not running, only update spec is enough, new resource
// limit will be applied when container start.
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
return nil
}
task, err := cntr.Container.Task(ctx, nil)
if err != nil {
if errdefs.IsNotFound(err) {
// Task exited already.
return nil
}
return errors.Wrap(err, "failed to get task")
}
// newSpec.Linux / newSpec.Windows won't be nil
if err := task.Update(ctx, containerd.WithResources(getResources(newSpec))); err != nil {
if errdefs.IsNotFound(err) {
// Task exited already.
return nil
}
return errors.Wrap(err, "failed to update resources")
}
return nil
}
// updateContainerSpec updates container spec.
func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error {
any, err := typeurl.MarshalAny(spec)
if err != nil {
return errors.Wrapf(err, "failed to marshal spec %+v", spec)
}
if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error {
c.Spec = any
return nil
}); err != nil {
return errors.Wrap(err, "failed to update container spec")
}
return nil
}

View File

@ -17,122 +17,20 @@
package server package server
import ( import (
gocontext "context"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/typeurl"
runtimespec "github.com/opencontainers/runtime-spec/specs-go" runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1" runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
criconfig "github.com/containerd/containerd/pkg/cri/config"
"github.com/containerd/containerd/pkg/cri/opts" "github.com/containerd/containerd/pkg/cri/opts"
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
"github.com/containerd/containerd/pkg/cri/util" "github.com/containerd/containerd/pkg/cri/util"
ctrdutil "github.com/containerd/containerd/pkg/cri/util"
) )
// UpdateContainerResources updates ContainerConfig of the container. // updateOCIResource updates container resource limit.
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) { func updateOCIResource(ctx context.Context, spec *runtimespec.Spec, r *runtime.UpdateContainerResourcesRequest,
container, err := c.containerStore.Get(r.GetContainerId()) config criconfig.Config) (*runtimespec.Spec, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to find container")
}
// Update resources in status update transaction, so that:
// 1) There won't be race condition with container start.
// 2) There won't be concurrent resource update to the same container.
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
return status, c.updateContainerResources(ctx, container, r.GetLinux(), status)
}); err != nil {
return nil, errors.Wrap(err, "failed to update resources")
}
return &runtime.UpdateContainerResourcesResponse{}, nil
}
func (c *criService) updateContainerResources(ctx context.Context,
cntr containerstore.Container,
resources *runtime.LinuxContainerResources,
status containerstore.Status) (retErr error) {
id := cntr.ID
// Do not update the container when there is a removal in progress.
if status.Removing {
return errors.Errorf("container %q is in removing state", id)
}
// Update container spec. If the container is not started yet, updating
// spec makes sure that the resource limits are correct when start;
// if the container is already started, updating spec is still required,
// the spec will become our source of truth for resource limits.
oldSpec, err := cntr.Container.Spec(ctx)
if err != nil {
return errors.Wrap(err, "failed to get container spec")
}
newSpec, err := updateOCILinuxResource(ctx, oldSpec, resources,
c.config.TolerateMissingHugetlbController, c.config.DisableHugetlbController)
if err != nil {
return errors.Wrap(err, "failed to update resource in spec")
}
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
return err
}
defer func() {
if retErr != nil {
deferCtx, deferCancel := ctrdutil.DeferContext()
defer deferCancel()
// Reset spec on error.
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
}
}
}()
// If container is not running, only update spec is enough, new resource
// limit will be applied when container start.
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
return nil
}
task, err := cntr.Container.Task(ctx, nil)
if err != nil {
if errdefs.IsNotFound(err) {
// Task exited already.
return nil
}
return errors.Wrap(err, "failed to get task")
}
// newSpec.Linux won't be nil
if err := task.Update(ctx, containerd.WithResources(newSpec.Linux.Resources)); err != nil {
if errdefs.IsNotFound(err) {
// Task exited already.
return nil
}
return errors.Wrap(err, "failed to update resources")
}
return nil
}
// updateContainerSpec updates container spec.
func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error {
any, err := typeurl.MarshalAny(spec)
if err != nil {
return errors.Wrapf(err, "failed to marshal spec %+v", spec)
}
if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error {
c.Spec = any
return nil
}); err != nil {
return errors.Wrap(err, "failed to update container spec")
}
return nil
}
// updateOCILinuxResource updates container resource limit.
func updateOCILinuxResource(ctx context.Context, spec *runtimespec.Spec, new *runtime.LinuxContainerResources,
tolerateMissingHugetlbController, disableHugetlbController bool) (*runtimespec.Spec, error) {
// Copy to make sure old spec is not changed. // Copy to make sure old spec is not changed.
var cloned runtimespec.Spec var cloned runtimespec.Spec
if err := util.DeepCopy(&cloned, spec); err != nil { if err := util.DeepCopy(&cloned, spec); err != nil {
@ -141,8 +39,12 @@ func updateOCILinuxResource(ctx context.Context, spec *runtimespec.Spec, new *ru
if cloned.Linux == nil { if cloned.Linux == nil {
cloned.Linux = &runtimespec.Linux{} cloned.Linux = &runtimespec.Linux{}
} }
if err := opts.WithResources(new, tolerateMissingHugetlbController, disableHugetlbController)(ctx, nil, nil, &cloned); err != nil { if err := opts.WithResources(r.GetLinux(), config.TolerateMissingHugetlbController, config.DisableHugetlbController)(ctx, nil, nil, &cloned); err != nil {
return nil, errors.Wrap(err, "unable to set linux container resources") return nil, errors.Wrap(err, "unable to set linux container resources")
} }
return &cloned, nil return &cloned, nil
} }
func getResources(spec *runtimespec.Spec) interface{} {
return spec.Linux.Resources
}

View File

@ -24,6 +24,8 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1" runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
criconfig "github.com/containerd/containerd/pkg/cri/config"
) )
func TestUpdateOCILinuxResource(t *testing.T) { func TestUpdateOCILinuxResource(t *testing.T) {
@ -31,7 +33,7 @@ func TestUpdateOCILinuxResource(t *testing.T) {
*oomscoreadj = -500 *oomscoreadj = -500
for desc, test := range map[string]struct { for desc, test := range map[string]struct {
spec *runtimespec.Spec spec *runtimespec.Spec
resources *runtime.LinuxContainerResources request *runtime.UpdateContainerResourcesRequest
expected *runtimespec.Spec expected *runtimespec.Spec
expectErr bool expectErr bool
}{ }{
@ -52,7 +54,8 @@ func TestUpdateOCILinuxResource(t *testing.T) {
}, },
}, },
}, },
resources: &runtime.LinuxContainerResources{ request: &runtime.UpdateContainerResourcesRequest{
Linux: &runtime.LinuxContainerResources{
CpuPeriod: 6666, CpuPeriod: 6666,
CpuQuota: 5555, CpuQuota: 5555,
CpuShares: 4444, CpuShares: 4444,
@ -62,6 +65,7 @@ func TestUpdateOCILinuxResource(t *testing.T) {
CpusetMems: "6-7", CpusetMems: "6-7",
Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "0"}, Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "0"},
}, },
},
expected: &runtimespec.Spec{ expected: &runtimespec.Spec{
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
Linux: &runtimespec.Linux{ Linux: &runtimespec.Linux{
@ -96,13 +100,15 @@ func TestUpdateOCILinuxResource(t *testing.T) {
}, },
}, },
}, },
resources: &runtime.LinuxContainerResources{ request: &runtime.UpdateContainerResourcesRequest{
Linux: &runtime.LinuxContainerResources{
CpuQuota: 5555, CpuQuota: 5555,
CpuShares: 4444, CpuShares: 4444,
MemoryLimitInBytes: 54321, MemoryLimitInBytes: 54321,
OomScoreAdj: 500, OomScoreAdj: 500,
CpusetMems: "6-7", CpusetMems: "6-7",
}, },
},
expected: &runtimespec.Spec{ expected: &runtimespec.Spec{
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
Linux: &runtimespec.Linux{ Linux: &runtimespec.Linux{
@ -129,7 +135,8 @@ func TestUpdateOCILinuxResource(t *testing.T) {
}, },
}, },
}, },
resources: &runtime.LinuxContainerResources{ request: &runtime.UpdateContainerResourcesRequest{
Linux: &runtime.LinuxContainerResources{
CpuPeriod: 6666, CpuPeriod: 6666,
CpuQuota: 5555, CpuQuota: 5555,
CpuShares: 4444, CpuShares: 4444,
@ -139,6 +146,7 @@ func TestUpdateOCILinuxResource(t *testing.T) {
CpusetMems: "6-7", CpusetMems: "6-7",
Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"}, Unified: map[string]string{"memory.min": "65536", "memory.swap.max": "1024"},
}, },
},
expected: &runtimespec.Spec{ expected: &runtimespec.Spec{
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
Linux: &runtimespec.Linux{ Linux: &runtimespec.Linux{
@ -173,7 +181,8 @@ func TestUpdateOCILinuxResource(t *testing.T) {
}, },
}, },
}, },
resources: &runtime.LinuxContainerResources{ request: &runtime.UpdateContainerResourcesRequest{
Linux: &runtime.LinuxContainerResources{
CpuPeriod: 6666, CpuPeriod: 6666,
CpuQuota: 5555, CpuQuota: 5555,
CpuShares: 4444, CpuShares: 4444,
@ -183,6 +192,7 @@ func TestUpdateOCILinuxResource(t *testing.T) {
CpusetMems: "6-7", CpusetMems: "6-7",
Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "1024"}, Unified: map[string]string{"memory.min": "1507328", "memory.swap.max": "1024"},
}, },
},
expected: &runtimespec.Spec{ expected: &runtimespec.Spec{
Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj}, Process: &runtimespec.Process{OOMScoreAdj: oomscoreadj},
Linux: &runtimespec.Linux{ Linux: &runtimespec.Linux{
@ -202,7 +212,13 @@ func TestUpdateOCILinuxResource(t *testing.T) {
}, },
} { } {
t.Logf("TestCase %q", desc) t.Logf("TestCase %q", desc)
got, err := updateOCILinuxResource(context.Background(), test.spec, test.resources, false, false) config := criconfig.Config{
PluginConfig: criconfig.PluginConfig{
TolerateMissingHugetlbController: false,
DisableHugetlbController: false,
},
}
got, err := updateOCIResource(context.Background(), test.spec, test.request, config)
if test.expectErr { if test.expectErr {
assert.Error(t, err) assert.Error(t, err)
} else { } else {

View File

@ -17,13 +17,34 @@
package server package server
import ( import (
"github.com/containerd/containerd/errdefs" runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1" runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
criconfig "github.com/containerd/containerd/pkg/cri/config"
"github.com/containerd/containerd/pkg/cri/opts"
"github.com/containerd/containerd/pkg/cri/util"
) )
// UpdateContainerResources updates ContainerConfig of the container. // updateOCIResource updates container resource limit.
// TODO(windows): Figure out whether windows support this. func updateOCIResource(ctx context.Context, spec *runtimespec.Spec, r *runtime.UpdateContainerResourcesRequest,
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (*runtime.UpdateContainerResourcesResponse, error) { config criconfig.Config) (*runtimespec.Spec, error) {
return nil, errdefs.ErrNotImplemented
// Copy to make sure old spec is not changed.
var cloned runtimespec.Spec
if err := util.DeepCopy(&cloned, spec); err != nil {
return nil, errors.Wrap(err, "failed to deep copy")
}
if cloned.Windows == nil {
cloned.Windows = &runtimespec.Windows{}
}
if err := opts.WithWindowsResources(r.GetWindows())(ctx, nil, nil, &cloned); err != nil {
return nil, errors.Wrap(err, "unable to set windows container resources")
}
return &cloned, nil
}
func getResources(spec *runtimespec.Spec) interface{} {
return spec.Windows.Resources
} }

View File

@ -878,7 +878,7 @@ func (in *instrumentedService) UpdateContainerResources(ctx context.Context, r *
if err := in.checkInitialized(); err != nil { if err := in.checkInitialized(); err != nil {
return nil, err return nil, err
} }
log.G(ctx).Infof("UpdateContainerResources for %q with %+v", r.GetContainerId(), r.GetLinux()) log.G(ctx).Infof("UpdateContainerResources for %q with Linux: %+v / Windows: %+v", r.GetContainerId(), r.GetLinux(), r.GetWindows())
defer func() { defer func() {
if err != nil { if err != nil {
log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId()) log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId())
@ -894,7 +894,7 @@ func (in *instrumentedAlphaService) UpdateContainerResources(ctx context.Context
if err := in.checkInitialized(); err != nil { if err := in.checkInitialized(); err != nil {
return nil, err return nil, err
} }
log.G(ctx).Infof("UpdateContainerResources for %q with %+v", r.GetContainerId(), r.GetLinux()) log.G(ctx).Infof("UpdateContainerResources for %q with Linux: %+v / Windows: %+v", r.GetContainerId(), r.GetLinux(), r.GetWindows())
defer func() { defer func() {
if err != nil { if err != nil {
log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId()) log.G(ctx).WithError(err).Errorf("UpdateContainerResources for %q failed", r.GetContainerId())