Allow to checkpoint and restore a container with console

runc already supports this case, so we just need to run it with proper
options.

Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
This commit is contained in:
Andrei Vagin 2018-04-24 20:10:25 +03:00
parent 0846d6f8e9
commit 60daa414db
2 changed files with 40 additions and 19 deletions

View File

@ -69,25 +69,24 @@ func HandleConsoleResize(ctx gocontext.Context, task resizer, con console.Consol
// NewTask creates a new task
func NewTask(ctx gocontext.Context, client *containerd.Client, container containerd.Container, checkpoint string, tty, nullIO bool, ioOpts []cio.Opt, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
stdio := cio.NewCreator(append([]cio.Opt{cio.WithStdio}, ioOpts...)...)
if checkpoint == "" {
ioCreator := stdio
if checkpoint != "" {
im, err := client.GetImage(ctx, checkpoint)
if err != nil {
return nil, err
}
opts = append(opts, containerd.WithTaskCheckpoint(im))
}
ioCreator := stdio
if tty {
ioCreator = cio.NewCreator(append([]cio.Opt{cio.WithStdio, cio.WithTerminal}, ioOpts...)...)
}
if nullIO {
if tty {
ioCreator = cio.NewCreator(append([]cio.Opt{cio.WithStdio, cio.WithTerminal}, ioOpts...)...)
return nil, errors.New("tty and null-io cannot be used together")
}
if nullIO {
if tty {
return nil, errors.New("tty and null-io cannot be used together")
}
ioCreator = cio.NullIO
}
return container.NewTask(ctx, ioCreator, opts...)
ioCreator = cio.NullIO
}
im, err := client.GetImage(ctx, checkpoint)
if err != nil {
return nil, err
}
opts = append(opts, containerd.WithTaskCheckpoint(im))
return container.NewTask(ctx, stdio, opts...)
return container.NewTask(ctx, ioCreator, opts...)
}
func getNewTaskOpts(context *cli.Context) []containerd.NewTaskOpts {

View File

@ -194,10 +194,23 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
s.p.mu.Lock()
defer s.p.mu.Unlock()
p := s.p
sio := p.stdio
var (
err error
socket *runc.Socket
)
if sio.Terminal {
if socket, err = runc.NewTempConsoleSocket(); err != nil {
return errors.Wrap(err, "failed to create OCI runtime console socket")
}
defer socket.Close()
s.opts.ConsoleSocket = socket
}
if _, err := s.p.runtime.Restore(ctx, p.id, p.bundle, s.opts); err != nil {
return p.runtimeError(err, "OCI runtime restore failed")
}
sio := p.stdio
if sio.Stdin != "" {
sc, err := fifo.OpenFifo(ctx, sio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
if err != nil {
@ -207,7 +220,17 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
p.closers = append(p.closers, sc)
}
var copyWaitGroup sync.WaitGroup
if !sio.IsNull() {
if socket != nil {
console, err := socket.ReceiveMaster()
if err != nil {
return errors.Wrap(err, "failed to retrieve console master")
}
console, err = p.platform.CopyConsole(ctx, console, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, &copyWaitGroup)
if err != nil {
return errors.Wrap(err, "failed to start console copy")
}
p.console = console
} else if !sio.IsNull() {
if err := copyPipes(ctx, p.io, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, &copyWaitGroup); err != nil {
return errors.Wrap(err, "failed to start io pipe copy")
}
@ -219,7 +242,6 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
}
p.pid = pid
return s.transition("running")
}