containerd-shim: Do not remount root MS_SLAVE
Mounting as MS_SLAVE here breaks use cases which want to use rootPropagation=shared in order to expose mounts to the host (and other containers binding the same subtree), mounting as e.g. MS_SHARED is pointless in this context so just remove. Having done this we also need to arrange to manually clean up the mounts on delete, so do so. Note that runc will also setup root as required by rootPropagation, defaulting to MS_PRIVATE. Fixes #1132. Signed-off-by: Ian Campbell <ian.campbell@docker.com>
This commit is contained in:
parent
0b3e572b85
commit
8b365117a2
@ -67,9 +67,6 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := setupRoot(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
path, err := os.Getwd()
|
path, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -10,7 +10,6 @@ import (
|
|||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/reaper"
|
"github.com/containerd/containerd/reaper"
|
||||||
"github.com/containerd/containerd/sys"
|
"github.com/containerd/containerd/sys"
|
||||||
@ -33,11 +32,6 @@ func setupSignals() (chan os.Signal, error) {
|
|||||||
return signals, nil
|
return signals, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupRoot sets up the root as the shim is started in its own mount namespace
|
|
||||||
func setupRoot() error {
|
|
||||||
return unix.Mount("", "/", "", unix.MS_SLAVE|unix.MS_REC, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func newServer() *grpc.Server {
|
func newServer() *grpc.Server {
|
||||||
return grpc.NewServer(grpc.Creds(NewUnixSocketCredentils(0, 0)))
|
return grpc.NewServer(grpc.Creds(NewUnixSocketCredentils(0, 0)))
|
||||||
}
|
}
|
||||||
|
@ -23,11 +23,6 @@ func setupSignals() (chan os.Signal, error) {
|
|||||||
return signals, nil
|
return signals, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupRoot is a no op except on Linux
|
|
||||||
func setupRoot() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newServer() *grpc.Server {
|
func newServer() *grpc.Server {
|
||||||
return grpc.NewServer()
|
return grpc.NewServer()
|
||||||
}
|
}
|
||||||
|
@ -50,6 +50,8 @@ type initProcess struct {
|
|||||||
closers []io.Closer
|
closers []io.Closer
|
||||||
stdin io.Closer
|
stdin io.Closer
|
||||||
stdio stdio
|
stdio stdio
|
||||||
|
rootfs string
|
||||||
|
nrRootMounts int // Number of rootfs overmounts
|
||||||
}
|
}
|
||||||
|
|
||||||
func newInitProcess(context context.Context, path, namespace string, r *shimapi.CreateTaskRequest) (*initProcess, error) {
|
func newInitProcess(context context.Context, path, namespace string, r *shimapi.CreateTaskRequest) (*initProcess, error) {
|
||||||
@ -64,15 +66,27 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
|
|||||||
}
|
}
|
||||||
options = *v.(*runcopts.CreateOptions)
|
options = *v.(*runcopts.CreateOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rootfs := filepath.Join(path, "rootfs")
|
||||||
|
// count the number of successful mounts so we can undo
|
||||||
|
// what was actually done rather than what should have been
|
||||||
|
// done.
|
||||||
|
nrRootMounts := 0
|
||||||
for _, rm := range r.Rootfs {
|
for _, rm := range r.Rootfs {
|
||||||
m := &mount.Mount{
|
m := &mount.Mount{
|
||||||
Type: rm.Type,
|
Type: rm.Type,
|
||||||
Source: rm.Source,
|
Source: rm.Source,
|
||||||
Options: rm.Options,
|
Options: rm.Options,
|
||||||
}
|
}
|
||||||
if err := m.Mount(filepath.Join(path, "rootfs")); err != nil {
|
if err := m.Mount(rootfs); err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m)
|
return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m)
|
||||||
}
|
}
|
||||||
|
nrRootMounts++
|
||||||
|
}
|
||||||
|
cleanupMounts := func() {
|
||||||
|
if err2 := mount.UnmountN(rootfs, 0, nrRootMounts); err2 != nil {
|
||||||
|
log.G(context).WithError(err2).Warn("Failed to cleanup rootfs mount")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
runtime := &runc.Runc{
|
runtime := &runc.Runc{
|
||||||
Command: r.Runtime,
|
Command: r.Runtime,
|
||||||
@ -91,6 +105,8 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
|
|||||||
stderr: r.Stderr,
|
stderr: r.Stderr,
|
||||||
terminal: r.Terminal,
|
terminal: r.Terminal,
|
||||||
},
|
},
|
||||||
|
rootfs: rootfs,
|
||||||
|
nrRootMounts: nrRootMounts,
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
@ -99,12 +115,14 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
|
|||||||
)
|
)
|
||||||
if r.Terminal {
|
if r.Terminal {
|
||||||
if socket, err = runc.NewConsoleSocket(filepath.Join(path, "pty.sock")); err != nil {
|
if socket, err = runc.NewConsoleSocket(filepath.Join(path, "pty.sock")); err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, errors.Wrap(err, "failed to create OCI runtime console socket")
|
return nil, errors.Wrap(err, "failed to create OCI runtime console socket")
|
||||||
}
|
}
|
||||||
defer os.Remove(socket.Path())
|
defer os.Remove(socket.Path())
|
||||||
} else {
|
} else {
|
||||||
// TODO: get uid/gid
|
// TODO: get uid/gid
|
||||||
if io, err = runc.NewPipeIO(0, 0); err != nil {
|
if io, err = runc.NewPipeIO(0, 0); err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, errors.Wrap(err, "failed to create OCI runtime io pipes")
|
return nil, errors.Wrap(err, "failed to create OCI runtime io pipes")
|
||||||
}
|
}
|
||||||
p.io = io
|
p.io = io
|
||||||
@ -124,6 +142,7 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
|
|||||||
NoSubreaper: true,
|
NoSubreaper: true,
|
||||||
}
|
}
|
||||||
if _, err := p.runtime.Restore(context, r.ID, r.Bundle, opts); err != nil {
|
if _, err := p.runtime.Restore(context, r.ID, r.Bundle, opts); err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, p.runtimeError(err, "OCI runtime restore failed")
|
return nil, p.runtimeError(err, "OCI runtime restore failed")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -137,6 +156,7 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
|
|||||||
opts.ConsoleSocket = socket
|
opts.ConsoleSocket = socket
|
||||||
}
|
}
|
||||||
if err := p.runtime.Create(context, r.ID, r.Bundle, opts); err != nil {
|
if err := p.runtime.Create(context, r.ID, r.Bundle, opts); err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, p.runtimeError(err, "OCI runtime create failed")
|
return nil, p.runtimeError(err, "OCI runtime create failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -152,14 +172,17 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
|
|||||||
if socket != nil {
|
if socket != nil {
|
||||||
console, err := socket.ReceiveMaster()
|
console, err := socket.ReceiveMaster()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, errors.Wrap(err, "failed to retrieve console master")
|
return nil, errors.Wrap(err, "failed to retrieve console master")
|
||||||
}
|
}
|
||||||
p.console = console
|
p.console = console
|
||||||
if err := copyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.WaitGroup, ©WaitGroup); err != nil {
|
if err := copyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.WaitGroup, ©WaitGroup); err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, errors.Wrap(err, "failed to start console copy")
|
return nil, errors.Wrap(err, "failed to start console copy")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := copyPipes(context, io, r.Stdin, r.Stdout, r.Stderr, &p.WaitGroup, ©WaitGroup); err != nil {
|
if err := copyPipes(context, io, r.Stdin, r.Stdout, r.Stderr, &p.WaitGroup, ©WaitGroup); err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, errors.Wrap(err, "failed to start io pipe copy")
|
return nil, errors.Wrap(err, "failed to start io pipe copy")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -167,6 +190,7 @@ func newInitProcess(context context.Context, path, namespace string, r *shimapi.
|
|||||||
copyWaitGroup.Wait()
|
copyWaitGroup.Wait()
|
||||||
pid, err := runc.ReadPidFile(pidFile)
|
pid, err := runc.ReadPidFile(pidFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
cleanupMounts()
|
||||||
return nil, errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
return nil, errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
||||||
}
|
}
|
||||||
p.pid = pid
|
p.pid = pid
|
||||||
@ -229,7 +253,16 @@ func (p *initProcess) Delete(context context.Context) error {
|
|||||||
}
|
}
|
||||||
p.io.Close()
|
p.io.Close()
|
||||||
}
|
}
|
||||||
return p.runtimeError(err, "OCI runtime delete failed")
|
err = p.runtimeError(err, "OCI runtime delete failed")
|
||||||
|
|
||||||
|
if err2 := mount.UnmountN(p.rootfs, 0, p.nrRootMounts); err2 != nil {
|
||||||
|
log.G(context).WithError(err2).Warn("Failed to cleanup rootfs mount")
|
||||||
|
if err == nil {
|
||||||
|
err = errors.Wrap(err2, "Failed rootfs umount")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *initProcess) Resize(ws console.WinSize) error {
|
func (p *initProcess) Resize(ws console.WinSize) error {
|
||||||
|
@ -22,3 +22,19 @@ func MountAll(mounts []Mount, target string) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmountN tries to unmount the given mount point nr times, which is
|
||||||
|
// useful for undoing a stack of mounts on the same mount
|
||||||
|
// point. Returns the first error encountered, but always attempts the
|
||||||
|
// full nr umounts.
|
||||||
|
func UnmountN(mount string, flags, nr int) error {
|
||||||
|
var err error
|
||||||
|
for i := 0; i < nr; i++ {
|
||||||
|
if err2 := Unmount(mount, flags); err2 != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user