Update Windows runtime to use snapshotter and differ layers
This changes the Windows runtime to use the snapshotter and differ created layers, and updates the ctr commands to use the snapshotter and differ. Signed-off-by: Darren Stahl <darst@microsoft.com>
This commit is contained in:
parent
a5a9f91832
commit
dcff993653
@ -7,32 +7,12 @@ import (
|
|||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/cmd/ctr/commands"
|
"github.com/containerd/containerd/cmd/ctr/commands"
|
||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
Command.Flags = append(Command.Flags, cli.StringSliceFlag{
|
|
||||||
Name: "layer",
|
|
||||||
Usage: "HCSSHIM Layers to be used",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func withLayers(context *cli.Context) oci.SpecOpts {
|
|
||||||
return func(ctx gocontext.Context, client oci.Client, c *containers.Container, s *specs.Spec) error {
|
|
||||||
l := context.StringSlice("layer")
|
|
||||||
if l == nil {
|
|
||||||
return errors.Wrap(errdefs.ErrInvalidArgument, "base layers must be specified with `--layer`")
|
|
||||||
}
|
|
||||||
s.Windows.LayerFolders = l
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func withTTY(terminal bool) oci.SpecOpts {
|
func withTTY(terminal bool) oci.SpecOpts {
|
||||||
if !terminal {
|
if !terminal {
|
||||||
return func(ctx gocontext.Context, client oci.Client, c *containers.Container, s *specs.Spec) error {
|
return func(ctx gocontext.Context, client oci.Client, c *containers.Container, s *specs.Spec) error {
|
||||||
@ -51,36 +31,39 @@ func withTTY(terminal bool) oci.SpecOpts {
|
|||||||
|
|
||||||
func newContainer(ctx gocontext.Context, client *containerd.Client, context *cli.Context) (containerd.Container, error) {
|
func newContainer(ctx gocontext.Context, client *containerd.Client, context *cli.Context) (containerd.Container, error) {
|
||||||
var (
|
var (
|
||||||
// ref = context.Args().First()
|
ref = context.Args().First()
|
||||||
id = context.Args().Get(1)
|
id = context.Args().Get(1)
|
||||||
args = context.Args()[2:]
|
args = context.Args()[2:]
|
||||||
tty = context.Bool("tty")
|
|
||||||
labelStrings = context.StringSlice("label")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
labels := commands.LabelArgs(labelStrings)
|
image, err := client.GetImage(ctx, ref)
|
||||||
|
if err != nil {
|
||||||
// TODO(mlaventure): get base image once we have a snapshotter
|
return nil, err
|
||||||
|
|
||||||
opts := []oci.SpecOpts{
|
|
||||||
// TODO(mlaventure): use oci.WithImageConfig once we have a snapshotter
|
|
||||||
withLayers(context),
|
|
||||||
oci.WithEnv(context.StringSlice("env")),
|
|
||||||
withMounts(context),
|
|
||||||
withTTY(tty),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
opts []oci.SpecOpts
|
||||||
|
cOpts []containerd.NewContainerOpts
|
||||||
|
)
|
||||||
|
opts = append(opts, oci.WithImageConfig(image))
|
||||||
|
opts = append(opts, oci.WithEnv(context.StringSlice("env")))
|
||||||
|
opts = append(opts, withMounts(context))
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
opts = append(opts, oci.WithProcessArgs(args...))
|
opts = append(opts, oci.WithProcessArgs(args...))
|
||||||
}
|
}
|
||||||
if cwd := context.String("cwd"); cwd != "" {
|
if cwd := context.String("cwd"); cwd != "" {
|
||||||
opts = append(opts, oci.WithProcessCwd(cwd))
|
opts = append(opts, oci.WithProcessCwd(cwd))
|
||||||
}
|
}
|
||||||
return client.NewContainer(ctx, id,
|
opts = append(opts, withTTY(context.Bool("tty")))
|
||||||
containerd.WithNewSpec(opts...),
|
|
||||||
containerd.WithContainerLabels(labels),
|
cOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice("label"))))
|
||||||
containerd.WithRuntime(context.String("runtime"), nil),
|
cOpts = append(cOpts, containerd.WithImage(image))
|
||||||
// TODO(mlaventure): containerd.WithImage(image),
|
cOpts = append(cOpts, containerd.WithSnapshotter(context.String("snapshotter")))
|
||||||
)
|
cOpts = append(cOpts, containerd.WithNewSnapshot(id, image))
|
||||||
|
cOpts = append(cOpts, containerd.WithRuntime(context.String("runtime"), nil))
|
||||||
|
|
||||||
|
cOpts = append([]containerd.NewContainerOpts{containerd.WithNewSpec(opts...)}, cOpts...)
|
||||||
|
return client.NewContainer(ctx, id, cOpts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNewTaskOpts(_ *cli.Context) []containerd.NewTaskOpts {
|
func getNewTaskOpts(_ *cli.Context) []containerd.NewTaskOpts {
|
||||||
|
@ -140,7 +140,7 @@ func (s *windowsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts
|
|||||||
// DiffMounts creates a diff between the given mounts and uploads the result
|
// DiffMounts creates a diff between the given mounts and uploads the result
|
||||||
// to the content store.
|
// to the content store.
|
||||||
func (s *windowsDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {
|
func (s *windowsDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {
|
||||||
panic("not implemented on Windows")
|
return emptyDesc, errdefs.ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
type readCounter struct {
|
type readCounter struct {
|
||||||
|
@ -39,11 +39,6 @@ func (m *Mount) Mount(target string) error {
|
|||||||
if err = hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil {
|
if err = hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil {
|
||||||
return errors.Wrapf(err, "failed to prepare layer %s", m.Source)
|
return errors.Wrapf(err, "failed to prepare layer %s", m.Source)
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
hcsshim.UnprepareLayer(di, layerID)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,10 +62,12 @@ func (m *Mount) GetParentPaths() ([]string, error) {
|
|||||||
|
|
||||||
// Unmount the mount at the provided path
|
// Unmount the mount at the provided path
|
||||||
func Unmount(mount string, flags int) error {
|
func Unmount(mount string, flags int) error {
|
||||||
home, layerID := filepath.Split(mount)
|
var (
|
||||||
var di = hcsshim.DriverInfo{
|
home, layerID = filepath.Split(mount)
|
||||||
HomeDir: home,
|
di = hcsshim.DriverInfo{
|
||||||
}
|
HomeDir: home,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
if err := hcsshim.UnprepareLayer(di, layerID); err != nil {
|
if err := hcsshim.UnprepareLayer(di, layerID); err != nil {
|
||||||
return errors.Wrapf(err, "failed to unprepare layer %s", mount)
|
return errors.Wrapf(err, "failed to unprepare layer %s", mount)
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package windows
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
"github.com/containerd/containerd/snapshots/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
func rollbackWithLogging(ctx context.Context, t storage.Transactor) {
|
|
||||||
if err := t.Rollback(); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Warn("failed to rollback transaction")
|
|
||||||
}
|
|
||||||
}
|
|
@ -41,12 +41,12 @@ type snapshotter struct {
|
|||||||
|
|
||||||
// NewSnapshotter returns a new windows snapshotter
|
// NewSnapshotter returns a new windows snapshotter
|
||||||
func NewSnapshotter(root string) (snapshots.Snapshotter, error) {
|
func NewSnapshotter(root string) (snapshots.Snapshotter, error) {
|
||||||
fsType, err := getFileSystemType(string(root[0]))
|
fsType, err := getFileSystemType(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if strings.ToLower(fsType) == "refs" {
|
if strings.ToLower(fsType) != "ntfs" {
|
||||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%s is on an ReFS volume - ReFS volumes are not supported", root)
|
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%s is not on an NTFS volume - only NTFS volumes are supported", root)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(root, 0700); err != nil {
|
if err := os.MkdirAll(root, 0700); err != nil {
|
||||||
@ -83,11 +83,7 @@ func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, err
|
|||||||
defer t.Rollback()
|
defer t.Rollback()
|
||||||
|
|
||||||
_, info, _, err := storage.GetInfo(ctx, key)
|
_, info, _, err := storage.GetInfo(ctx, key)
|
||||||
if err != nil {
|
return info, err
|
||||||
return snapshots.Info{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
|
func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
|
||||||
@ -95,13 +91,7 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return snapshots.Info{}, err
|
return snapshots.Info{}, err
|
||||||
}
|
}
|
||||||
|
defer t.Rollback()
|
||||||
var committed bool
|
|
||||||
defer func() {
|
|
||||||
if committed == false {
|
|
||||||
rollbackWithLogging(ctx, t)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
info, err = storage.UpdateInfo(ctx, info, fieldpaths...)
|
info, err = storage.UpdateInfo(ctx, info, fieldpaths...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -111,7 +101,6 @@ func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath
|
|||||||
if err := t.Commit(); err != nil {
|
if err := t.Commit(); err != nil {
|
||||||
return snapshots.Info{}, err
|
return snapshots.Info{}, err
|
||||||
}
|
}
|
||||||
committed = true
|
|
||||||
|
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
@ -156,6 +145,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer t.Rollback()
|
defer t.Rollback()
|
||||||
|
|
||||||
snapshot, err := storage.GetSnapshot(ctx, key)
|
snapshot, err := storage.GetSnapshot(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get snapshot mount")
|
return nil, errors.Wrap(err, "failed to get snapshot mount")
|
||||||
@ -168,13 +158,8 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer t.Rollback()
|
||||||
|
|
||||||
var committed bool
|
|
||||||
defer func() {
|
|
||||||
if committed == false {
|
|
||||||
rollbackWithLogging(ctx, t)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
usage := fs.Usage{
|
usage := fs.Usage{
|
||||||
Size: 0,
|
Size: 0,
|
||||||
}
|
}
|
||||||
@ -186,7 +171,6 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap
|
|||||||
if err := t.Commit(); err != nil {
|
if err := t.Commit(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
committed = true
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,13 +181,7 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer t.Rollback()
|
||||||
var committed bool
|
|
||||||
defer func() {
|
|
||||||
if committed == false {
|
|
||||||
rollbackWithLogging(ctx, t)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
id, _, err := storage.Remove(ctx, key)
|
id, _, err := storage.Remove(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -217,15 +195,13 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = t.Commit()
|
if err := t.Commit(); err != nil {
|
||||||
if err != nil {
|
|
||||||
if err1 := os.Rename(renamed, path); err1 != nil {
|
if err1 := os.Rename(renamed, path); err1 != nil {
|
||||||
// May cause inconsistent data on disk
|
// May cause inconsistent data on disk
|
||||||
log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("Failed to rename after failed commit")
|
log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("Failed to rename after failed commit")
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to commit")
|
return errors.Wrap(err, "failed to commit")
|
||||||
}
|
}
|
||||||
committed = true
|
|
||||||
|
|
||||||
if err := hcsshim.DestroyLayer(s.info, renamedID); err != nil {
|
if err := hcsshim.DestroyLayer(s.info, renamedID); err != nil {
|
||||||
// Must be cleaned up, any "rm-*" could be removed if no active transactions
|
// Must be cleaned up, any "rm-*" could be removed if no active transactions
|
||||||
@ -242,6 +218,7 @@ func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapsho
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer t.Rollback()
|
defer t.Rollback()
|
||||||
|
|
||||||
return storage.WalkInfo(ctx, fn)
|
return storage.WalkInfo(ctx, fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,9 +228,7 @@ func (s *snapshotter) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *snapshotter) mounts(sn storage.Snapshot) []mount.Mount {
|
func (s *snapshotter) mounts(sn storage.Snapshot) []mount.Mount {
|
||||||
var (
|
var roFlag string
|
||||||
roFlag string
|
|
||||||
)
|
|
||||||
|
|
||||||
if sn.Kind == snapshots.KindView {
|
if sn.Kind == snapshots.KindView {
|
||||||
roFlag = "ro"
|
roFlag = "ro"
|
||||||
@ -288,13 +263,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer t.Rollback()
|
||||||
var committed bool
|
|
||||||
defer func() {
|
|
||||||
if committed == false {
|
|
||||||
rollbackWithLogging(ctx, t)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
newSnapshot, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...)
|
newSnapshot, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -328,7 +297,6 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k
|
|||||||
if err := t.Commit(); err != nil {
|
if err := t.Commit(); err != nil {
|
||||||
return nil, errors.Wrap(err, "commit failed")
|
return nil, errors.Wrap(err, "commit failed")
|
||||||
}
|
}
|
||||||
committed = true
|
|
||||||
|
|
||||||
return s.mounts(newSnapshot), nil
|
return s.mounts(newSnapshot), nil
|
||||||
}
|
}
|
||||||
@ -343,17 +311,19 @@ func (s *snapshotter) parentIDsToParentPaths(parentIDs []string) []string {
|
|||||||
|
|
||||||
// getFileSystemType obtains the type of a file system through GetVolumeInformation
|
// getFileSystemType obtains the type of a file system through GetVolumeInformation
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
|
||||||
func getFileSystemType(drive string) (fsType string, hr error) {
|
func getFileSystemType(path string) (fsType string, hr error) {
|
||||||
|
drive := filepath.VolumeName(path)
|
||||||
|
if len(drive) != 2 {
|
||||||
|
return "", errors.New("getFileSystemType path must start with a drive letter")
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
|
procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
|
||||||
buf = make([]uint16, 255)
|
buf = make([]uint16, 255)
|
||||||
size = windows.MAX_PATH + 1
|
size = windows.MAX_PATH + 1
|
||||||
)
|
)
|
||||||
if len(drive) != 1 {
|
drive += `\`
|
||||||
return "", errors.New("getFileSystemType must be called with a drive letter")
|
|
||||||
}
|
|
||||||
drive += `:\`
|
|
||||||
n := uintptr(unsafe.Pointer(nil))
|
n := uintptr(unsafe.Pointer(nil))
|
||||||
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
|
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
|
||||||
if int32(r0) < 0 {
|
if int32(r0) < 0 {
|
||||||
|
@ -4,14 +4,12 @@ package windows
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim"
|
"github.com/Microsoft/hcsshim"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -49,14 +47,14 @@ func newWindowsContainerConfig(ctx context.Context, owner, id string, spec *spec
|
|||||||
}
|
}
|
||||||
conf.IgnoreFlushesDuringBoot = spec.Windows.IgnoreFlushesDuringBoot
|
conf.IgnoreFlushesDuringBoot = spec.Windows.IgnoreFlushesDuringBoot
|
||||||
|
|
||||||
if len(spec.Windows.LayerFolders) < 1 {
|
if len(spec.Windows.LayerFolders) < 2 {
|
||||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
|
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
|
||||||
"spec.Windows.LayerFolders must have at least 1 layers")
|
"spec.Windows.LayerFolders must have at least 2 layers")
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
layerFolders = spec.Windows.LayerFolders
|
layerFolderPath = spec.Windows.LayerFolders[0]
|
||||||
homeDir = filepath.Dir(layerFolders[0])
|
layerFolders = spec.Windows.LayerFolders[1:]
|
||||||
layerFolderPath = filepath.Join(homeDir, id)
|
layerID = filepath.Base(layerFolderPath)
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: use the create request Mount for those
|
// TODO: use the create request Mount for those
|
||||||
@ -71,39 +69,12 @@ func newWindowsContainerConfig(ctx context.Context, owner, id string, spec *spec
|
|||||||
Path: layerPath,
|
Path: layerPath,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
di = hcsshim.DriverInfo{
|
|
||||||
Flavour: 1, // filter driver
|
|
||||||
HomeDir: homeDir,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
conf.LayerFolderPath = layerFolderPath
|
conf.LayerFolderPath = layerFolderPath
|
||||||
|
|
||||||
// TODO: Once there is a snapshotter for windows, this can be deleted.
|
var di = hcsshim.DriverInfo{
|
||||||
// The R/W Layer should come from the Rootfs Mounts provided
|
HomeDir: filepath.Dir(layerFolderPath),
|
||||||
//
|
|
||||||
// Windows doesn't support creating a container with a readonly
|
|
||||||
// filesystem, so always create a RW one
|
|
||||||
if err = hcsshim.CreateSandboxLayer(di, id, layerFolders[0], layerFolders); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
|
|
||||||
id, layerFolders, di)
|
|
||||||
}
|
}
|
||||||
defer func() {
|
conf.VolumePath, err = hcsshim.GetLayerMountPath(di, layerID)
|
||||||
if err != nil {
|
|
||||||
removeLayer(ctx, conf.LayerFolderPath)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = hcsshim.ActivateLayer(di, id); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to activate layer %s", conf.LayerFolderPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = hcsshim.PrepareLayer(di, id, layerFolders); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to prepare layer %s", conf.LayerFolderPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
conf.VolumePath, err = hcsshim.GetLayerMountPath(di, id)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
|
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
|
||||||
}
|
}
|
||||||
@ -146,41 +117,6 @@ func newWindowsContainerConfig(ctx context.Context, owner, id string, spec *spec
|
|||||||
return conf, nil
|
return conf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeLayer deletes the given layer, all associated containers must have
|
|
||||||
// been shutdown for this to succeed.
|
|
||||||
func removeLayer(ctx context.Context, path string) error {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
layerID = filepath.Base(path)
|
|
||||||
parentPath = filepath.Dir(path)
|
|
||||||
di = hcsshim.DriverInfo{
|
|
||||||
Flavour: 1, // filter driver
|
|
||||||
HomeDir: parentPath,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if err = hcsshim.UnprepareLayer(di, layerID); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Warnf("failed to unprepare layer %s for removal", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = hcsshim.DeactivateLayer(di, layerID); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Warnf("failed to deactivate layer %s for removal", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
removePath := filepath.Join(parentPath, fmt.Sprintf("%s-removing", layerID))
|
|
||||||
if err = os.Rename(path, removePath); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Warnf("failed to rename container layer %s for removal", path)
|
|
||||||
removePath = path
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = hcsshim.DestroyLayer(di, removePath); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Errorf("failed to remove container layer %s", removePath)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newProcessConfig(processSpec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig {
|
func newProcessConfig(processSpec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig {
|
||||||
conf := &hcsshim.ProcessConfig{
|
conf := &hcsshim.ProcessConfig{
|
||||||
EmulateConsole: pset.src.Terminal,
|
EmulateConsole: pset.src.Terminal,
|
||||||
|
@ -1,54 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package windows
|
|
||||||
|
|
||||||
// TODO: remove this file (i.e. meta.go) once we have a snapshotter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newLayerFolderStore(tx *bolt.Tx) *layerFolderStore {
|
|
||||||
return &layerFolderStore{tx}
|
|
||||||
}
|
|
||||||
|
|
||||||
type layerFolderStore struct {
|
|
||||||
tx *bolt.Tx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *layerFolderStore) Create(id, layer string) error {
|
|
||||||
bkt, err := s.tx.CreateBucketIfNotExists([]byte(pluginID))
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to create bucket %s", pluginID)
|
|
||||||
}
|
|
||||||
err = bkt.Put([]byte(id), []byte(layer))
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to store entry %s:%s", id, layer)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *layerFolderStore) Get(id string) (string, error) {
|
|
||||||
bkt := s.tx.Bucket([]byte(pluginID))
|
|
||||||
if bkt == nil {
|
|
||||||
return "", errors.Wrapf(errdefs.ErrNotFound, "bucket %s", pluginID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(bkt.Get([]byte(id))), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *layerFolderStore) Delete(id string) error {
|
|
||||||
bkt := s.tx.Bucket([]byte(pluginID))
|
|
||||||
if bkt == nil {
|
|
||||||
return errors.Wrapf(errdefs.ErrNotFound, "bucket %s", pluginID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := bkt.Delete([]byte(id)); err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to delete entry %s", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -10,13 +10,11 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim"
|
"github.com/Microsoft/hcsshim"
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
eventstypes "github.com/containerd/containerd/api/events"
|
eventstypes "github.com/containerd/containerd/api/events"
|
||||||
containerdtypes "github.com/containerd/containerd/api/types"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/events"
|
"github.com/containerd/containerd/events"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/metadata"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
"github.com/containerd/containerd/plugin"
|
"github.com/containerd/containerd/plugin"
|
||||||
"github.com/containerd/containerd/runtime"
|
"github.com/containerd/containerd/runtime"
|
||||||
@ -55,12 +53,6 @@ func New(ic *plugin.InitContext) (interface{}, error) {
|
|||||||
if err := os.MkdirAll(ic.Root, 0700); err != nil {
|
if err := os.MkdirAll(ic.Root, 0700); err != nil {
|
||||||
return nil, errors.Wrapf(err, "could not create state directory at %s", ic.Root)
|
return nil, errors.Wrapf(err, "could not create state directory at %s", ic.Root)
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := ic.Get(plugin.MetadataPlugin)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r := &windowsRuntime{
|
r := &windowsRuntime{
|
||||||
root: ic.Root,
|
root: ic.Root,
|
||||||
pidPool: newPidPool(),
|
pidPool: newPidPool(),
|
||||||
@ -70,7 +62,6 @@ func New(ic *plugin.InitContext) (interface{}, error) {
|
|||||||
// TODO(mlaventure): windows needs a stat monitor
|
// TODO(mlaventure): windows needs a stat monitor
|
||||||
monitor: nil,
|
monitor: nil,
|
||||||
tasks: runtime.NewTaskList(),
|
tasks: runtime.NewTaskList(),
|
||||||
db: m.(*metadata.DB),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load our existing containers and kill/delete them. We don't support
|
// Load our existing containers and kill/delete them. We don't support
|
||||||
@ -91,7 +82,6 @@ type windowsRuntime struct {
|
|||||||
|
|
||||||
monitor runtime.TaskMonitor
|
monitor runtime.TaskMonitor
|
||||||
tasks *runtime.TaskList
|
tasks *runtime.TaskList
|
||||||
db *metadata.DB
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *windowsRuntime) ID() string {
|
func (r *windowsRuntime) ID() string {
|
||||||
@ -124,8 +114,14 @@ func (r *windowsRuntime) Create(ctx context.Context, id string, opts runtime.Cre
|
|||||||
if createOpts.TerminateDuration == 0 {
|
if createOpts.TerminateDuration == 0 {
|
||||||
createOpts.TerminateDuration = defaultTerminateDuration
|
createOpts.TerminateDuration = defaultTerminateDuration
|
||||||
}
|
}
|
||||||
|
spec.Windows.LayerFolders = append(spec.Windows.LayerFolders, opts.Rootfs[0].Source)
|
||||||
|
parentLayerPaths, err := opts.Rootfs[0].GetParentPaths()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
spec.Windows.LayerFolders = append(spec.Windows.LayerFolders, parentLayerPaths...)
|
||||||
|
|
||||||
return r.newTask(ctx, namespace, id, spec, opts.IO, createOpts)
|
return r.newTask(ctx, namespace, id, opts.Rootfs, spec, opts.IO, createOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *windowsRuntime) Get(ctx context.Context, id string) (runtime.Task, error) {
|
func (r *windowsRuntime) Get(ctx context.Context, id string) (runtime.Task, error) {
|
||||||
@ -209,14 +205,19 @@ func (r *windowsRuntime) Delete(ctx context.Context, t runtime.Task) (*runtime.E
|
|||||||
ns, _ := namespaces.Namespace(ctx)
|
ns, _ := namespaces.Namespace(ctx)
|
||||||
serviceCtx := log.WithLogger(context.Background(), log.GetLogger(ctx))
|
serviceCtx := log.WithLogger(context.Background(), log.GetLogger(ctx))
|
||||||
serviceCtx = namespaces.WithNamespace(serviceCtx, ns)
|
serviceCtx = namespaces.WithNamespace(serviceCtx, ns)
|
||||||
r.serviceTask(serviceCtx, ns, wt.id+"_servicing", wt.spec)
|
r.serviceTask(serviceCtx, ns, wt.id+"_servicing", wt.rootfs, wt.spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mount.UnmountAll(wt.rootfs[0].Source, 0); err != nil {
|
||||||
|
log.G(ctx).WithError(err).WithField("path", wt.rootfs[0].Source).
|
||||||
|
Warn("failed to unmount rootfs on failure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We were never started, return failure
|
// We were never started, return failure
|
||||||
return rtExit, nil
|
return rtExit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec *specs.Spec, io runtime.IO, createOpts *hcsshimtypes.CreateOptions) (*task, error) {
|
func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, rootfs []mount.Mount, spec *specs.Spec, io runtime.IO, createOpts *hcsshimtypes.CreateOptions) (*task, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
pset *pipeSet
|
pset *pipeSet
|
||||||
@ -241,6 +242,18 @@ func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err := mount.All(rootfs, ""); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to mount rootfs")
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if err := mount.UnmountAll(rootfs[0].Source, 0); err != nil {
|
||||||
|
log.G(ctx).WithError(err).WithField("path", rootfs[0].Source).
|
||||||
|
Warn("failed to unmount rootfs on failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
conf *hcsshim.ContainerConfig
|
conf *hcsshim.ContainerConfig
|
||||||
nsid = namespace + "-" + id
|
nsid = namespace + "-" + id
|
||||||
@ -248,31 +261,6 @@ func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec
|
|||||||
if conf, err = newWindowsContainerConfig(ctx, hcsshimOwner, nsid, spec); err != nil {
|
if conf, err = newWindowsContainerConfig(ctx, hcsshimOwner, nsid, spec); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
removeLayer(ctx, conf.LayerFolderPath)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// TODO: remove this once we have a windows snapshotter
|
|
||||||
// Store the LayerFolder in the db so we can clean it if we die
|
|
||||||
if err = r.db.Update(func(tx *bolt.Tx) error {
|
|
||||||
s := newLayerFolderStore(tx)
|
|
||||||
return s.Create(nsid, conf.LayerFolderPath)
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if dbErr := r.db.Update(func(tx *bolt.Tx) error {
|
|
||||||
s := newLayerFolderStore(tx)
|
|
||||||
return s.Delete(nsid)
|
|
||||||
}); dbErr != nil {
|
|
||||||
log.G(ctx).WithField("id", id).
|
|
||||||
Error("failed to remove key from metadata")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
ctr, err := hcsshim.CreateContainer(nsid, conf)
|
ctr, err := hcsshim.CreateContainer(nsid, conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -301,6 +289,7 @@ func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec
|
|||||||
hyperV: spec.Windows.HyperV != nil,
|
hyperV: spec.Windows.HyperV != nil,
|
||||||
publisher: r.publisher,
|
publisher: r.publisher,
|
||||||
rwLayer: conf.LayerFolderPath,
|
rwLayer: conf.LayerFolderPath,
|
||||||
|
rootfs: rootfs,
|
||||||
pidPool: r.pidPool,
|
pidPool: r.pidPool,
|
||||||
hcsContainer: ctr,
|
hcsContainer: ctr,
|
||||||
terminateDuration: createOpts.TerminateDuration,
|
terminateDuration: createOpts.TerminateDuration,
|
||||||
@ -312,14 +301,6 @@ func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec
|
|||||||
}
|
}
|
||||||
r.tasks.Add(ctx, t)
|
r.tasks.Add(ctx, t)
|
||||||
|
|
||||||
var rootfs []*containerdtypes.Mount
|
|
||||||
for _, l := range append([]string{t.rwLayer}, spec.Windows.LayerFolders...) {
|
|
||||||
rootfs = append(rootfs, &containerdtypes.Mount{
|
|
||||||
Type: "windows-layer",
|
|
||||||
Source: l,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
r.publisher.Publish(ctx,
|
r.publisher.Publish(ctx,
|
||||||
runtime.TaskCreateEventTopic,
|
runtime.TaskCreateEventTopic,
|
||||||
&eventstypes.TaskCreate{
|
&eventstypes.TaskCreate{
|
||||||
@ -330,8 +311,8 @@ func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec
|
|||||||
Stderr: io.Stderr,
|
Stderr: io.Stderr,
|
||||||
Terminal: io.Terminal,
|
Terminal: io.Terminal,
|
||||||
},
|
},
|
||||||
Pid: t.pid,
|
Pid: t.pid,
|
||||||
Rootfs: rootfs,
|
//???Rootfs: rootfs,
|
||||||
// TODO: what should be in Bundle for windows?
|
// TODO: what should be in Bundle for windows?
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -360,34 +341,10 @@ func (r *windowsRuntime) cleanup(ctx context.Context) {
|
|||||||
container.Wait()
|
container.Wait()
|
||||||
}
|
}
|
||||||
container.Close()
|
container.Close()
|
||||||
|
|
||||||
// TODO: remove this once we have a windows snapshotter
|
|
||||||
var layerFolderPath string
|
|
||||||
if err := r.db.View(func(tx *bolt.Tx) error {
|
|
||||||
s := newLayerFolderStore(tx)
|
|
||||||
l, e := s.Get(p.ID)
|
|
||||||
if err == nil {
|
|
||||||
layerFolderPath = l
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}); err == nil && layerFolderPath != "" {
|
|
||||||
removeLayer(ctx, layerFolderPath)
|
|
||||||
if dbErr := r.db.Update(func(tx *bolt.Tx) error {
|
|
||||||
s := newLayerFolderStore(tx)
|
|
||||||
return s.Delete(p.ID)
|
|
||||||
}); dbErr != nil {
|
|
||||||
log.G(ctx).WithField("id", p.ID).
|
|
||||||
Error("failed to remove key from metadata")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.G(ctx).WithField("id", p.ID).
|
|
||||||
Debug("key not found in metadata, R/W layer may be leaked")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *windowsRuntime) serviceTask(ctx context.Context, namespace, id string, spec *specs.Spec) {
|
func (r *windowsRuntime) serviceTask(ctx context.Context, namespace, id string, rootfs []mount.Mount, spec *specs.Spec) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
t *task
|
t *task
|
||||||
@ -397,7 +354,7 @@ func (r *windowsRuntime) serviceTask(ctx context.Context, namespace, id string,
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
t, err = r.newTask(ctx, namespace, id, spec, io, createOpts)
|
t, err = r.newTask(ctx, namespace, id, rootfs, spec, io, createOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithError(err).WithField("id", id).
|
log.G(ctx).WithError(err).WithField("id", id).
|
||||||
Warn("failed to created servicing task")
|
Warn("failed to created servicing task")
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
eventstypes "github.com/containerd/containerd/api/events"
|
eventstypes "github.com/containerd/containerd/api/events"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/events"
|
"github.com/containerd/containerd/events"
|
||||||
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/runtime"
|
"github.com/containerd/containerd/runtime"
|
||||||
"github.com/containerd/containerd/windows/hcsshimtypes"
|
"github.com/containerd/containerd/windows/hcsshimtypes"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
@ -33,6 +34,7 @@ type task struct {
|
|||||||
|
|
||||||
publisher events.Publisher
|
publisher events.Publisher
|
||||||
rwLayer string
|
rwLayer string
|
||||||
|
rootfs []mount.Mount
|
||||||
|
|
||||||
pidPool *pidPool
|
pidPool *pidPool
|
||||||
hcsContainer hcsshim.Container
|
hcsContainer hcsshim.Container
|
||||||
@ -406,6 +408,5 @@ func (t *task) cleanup() {
|
|||||||
for _, p := range t.processes {
|
for _, p := range t.processes {
|
||||||
t.removeProcessNL(p.id)
|
t.removeProcessNL(p.id)
|
||||||
}
|
}
|
||||||
removeLayer(context.Background(), t.rwLayer)
|
|
||||||
t.Unlock()
|
t.Unlock()
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user