containerd/cmd/ctr/run.go
Stephen J Day 193abed96e
content: unify provider and ingester
The split between provider and ingester was a long standing division
reflecting the client-side use cases. For the most part, we were
differentiating these for the algorithms that operate them, but it made
instantation and use of the types challenging. On the server-side, this
distinction is generally less important. This change unifies these types
and in the process we get a few benefits.

The first is that we now completely access the content store over GRPC.
This was the initial intent and we have now satisfied this goal
completely. There are a few issues around listing content and getting
status, but we resolve these with simple streaming and regexp filters.
More can probably be done to polish this but the result is clean.

Several other content-oriented methods were polished in the process of
unification. We have now properly seperated out the `Abort` method to
cancel ongoing or stalled ingest processes. We have also replaced the
`Active` method with a single status method.

The transition went extremely smoothly. Once the clients were updated to
use the new methods, every thing worked as expected on the first
compile.

Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-05-10 17:05:53 -07:00

203 lines
4.7 KiB
Go

package main
import (
gocontext "context"
"encoding/json"
"fmt"
"os"
"runtime"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"github.com/Sirupsen/logrus"
"github.com/containerd/console"
"github.com/containerd/containerd/api/services/execution"
rootfsapi "github.com/containerd/containerd/api/services/rootfs"
"github.com/containerd/containerd/images"
"github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
var runCommand = cli.Command{
Name: "run",
Usage: "run a container",
ArgsUsage: "IMAGE [COMMAND] [ARG...]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Usage: "id of the container",
},
cli.BoolFlag{
Name: "tty,t",
Usage: "allocate a TTY for the container",
},
cli.StringFlag{
Name: "rootfs",
Usage: "path to rootfs",
},
cli.StringFlag{
Name: "runtime",
Usage: "runtime name (linux, windows, vmware-linux)",
Value: "linux",
},
cli.StringFlag{
Name: "runtime-config",
Usage: "set the OCI config file for the container",
},
cli.BoolFlag{
Name: "readonly",
Usage: "set the containers filesystem as readonly",
},
},
Action: func(context *cli.Context) error {
var (
err error
resp *rootfsapi.MountResponse
imageConfig ocispec.Image
ctx = gocontext.Background()
id = context.String("id")
)
if id == "" {
return errors.New("container id must be provided")
}
containers, err := getExecutionService(context)
if err != nil {
return err
}
tmpDir, err := getTempDir(id)
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
events, err := containers.Events(ctx, &execution.EventsRequest{})
if err != nil {
return err
}
content, err := getContentStore(context)
if err != nil {
return err
}
rootfsClient, err := getRootFSService(context)
if err != nil {
return err
}
imageStore, err := getImageStore(context)
if err != nil {
return errors.Wrap(err, "failed resolving image store")
}
if runtime.GOOS != "windows" && context.String("rootfs") == "" {
ref := context.Args().First()
image, err := imageStore.Get(ctx, ref)
if err != nil {
return errors.Wrapf(err, "could not resolve %q", ref)
}
// let's close out our db and tx so we don't hold the lock whilst running.
diffIDs, err := image.RootFS(ctx, content)
if err != nil {
return err
}
if _, err := rootfsClient.Prepare(gocontext.TODO(), &rootfsapi.PrepareRequest{
Name: id,
ChainID: identity.ChainID(diffIDs),
}); err != nil {
if grpc.Code(err) != codes.AlreadyExists {
return err
}
}
resp, err = rootfsClient.Mounts(gocontext.TODO(), &rootfsapi.MountsRequest{
Name: id,
})
if err != nil {
return err
}
ic, err := image.Config(ctx, content)
if err != nil {
return err
}
switch ic.MediaType {
case ocispec.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
r, err := content.Reader(ctx, ic.Digest)
if err != nil {
return err
}
if err := json.NewDecoder(r).Decode(&imageConfig); err != nil {
r.Close()
return err
}
r.Close()
default:
return fmt.Errorf("unknown image config media type %s", ic.MediaType)
}
} else {
// TODO: get the image / rootfs through the API once windows has a snapshotter
}
create, err := newCreateRequest(context, &imageConfig.Config, id, tmpDir, context.String("rootfs"))
if err != nil {
return err
}
if resp != nil {
create.Rootfs = resp.Mounts
}
var con console.Console
if create.Terminal {
con = console.Current()
defer con.Reset()
if err := con.SetRaw(); err != nil {
return err
}
}
fwg, err := prepareStdio(create.Stdin, create.Stdout, create.Stderr, create.Terminal)
if err != nil {
return err
}
response, err := containers.Create(ctx, create)
if err != nil {
return err
}
if create.Terminal {
if err := handleConsoleResize(ctx, containers, response.ID, response.Pid, con); err != nil {
logrus.WithError(err).Error("console resize")
}
} else {
sigc := forwardAllSignals(containers, id)
defer stopCatch(sigc)
}
if _, err := containers.Start(ctx, &execution.StartRequest{
ID: response.ID,
}); err != nil {
return err
}
// Ensure we read all io only if container started successfully.
defer fwg.Wait()
status, err := waitContainer(events, response.ID, response.Pid)
if err != nil {
return err
}
if _, err := containers.Delete(ctx, &execution.DeleteRequest{
ID: response.ID,
}); err != nil {
return err
}
if status != 0 {
return cli.NewExitError("", int(status))
}
return nil
},
}