
The split between provider and ingester was a long standing division reflecting the client-side use cases. For the most part, we were differentiating these for the algorithms that operate them, but it made instantation and use of the types challenging. On the server-side, this distinction is generally less important. This change unifies these types and in the process we get a few benefits. The first is that we now completely access the content store over GRPC. This was the initial intent and we have now satisfied this goal completely. There are a few issues around listing content and getting status, but we resolve these with simple streaming and regexp filters. More can probably be done to polish this but the result is clean. Several other content-oriented methods were polished in the process of unification. We have now properly seperated out the `Abort` method to cancel ongoing or stalled ingest processes. We have also replaced the `Active` method with a single status method. The transition went extremely smoothly. Once the clients were updated to use the new methods, every thing worked as expected on the first compile. Signed-off-by: Stephen J Day <stephen.day@docker.com>
227 lines
5.2 KiB
Go
227 lines
5.2 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"os"
|
|
"text/tabwriter"
|
|
"time"
|
|
|
|
rootfsapi "github.com/containerd/containerd/api/services/rootfs"
|
|
"github.com/containerd/containerd/content"
|
|
"github.com/containerd/containerd/images"
|
|
"github.com/containerd/containerd/log"
|
|
"github.com/containerd/containerd/progress"
|
|
"github.com/containerd/containerd/remotes"
|
|
rootfsservice "github.com/containerd/containerd/services/rootfs"
|
|
"github.com/opencontainers/image-spec/identity"
|
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
"github.com/urfave/cli"
|
|
"golang.org/x/sync/errgroup"
|
|
)
|
|
|
|
var pullCommand = cli.Command{
|
|
Name: "pull",
|
|
Usage: "pull an image from a remote",
|
|
ArgsUsage: "[flags] <ref>",
|
|
Description: `Fetch and prepare an image for use in containerd.
|
|
|
|
After pulling an image, it should be ready to use the same reference in a run
|
|
command. As part of this process, we do the following:
|
|
|
|
1. Fetch all resources into containerd.
|
|
2. Prepare the snapshot filesystem with the pulled resources.
|
|
3. Register metadata for the image.
|
|
`,
|
|
Flags: registryFlags,
|
|
Action: func(clicontext *cli.Context) error {
|
|
var (
|
|
ref = clicontext.Args().First()
|
|
)
|
|
|
|
ctx, cancel := appContext()
|
|
defer cancel()
|
|
|
|
cs, err := resolveContentStore(clicontext)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
imageStore, err := resolveImageStore(clicontext)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
resolver, err := getResolver(ctx, clicontext)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
ongoing := newJobs()
|
|
|
|
eg, ctx := errgroup.WithContext(ctx)
|
|
|
|
var resolvedImageName string
|
|
resolved := make(chan struct{})
|
|
eg.Go(func() error {
|
|
ongoing.add(ref)
|
|
name, desc, fetcher, err := resolver.Resolve(ctx, ref)
|
|
if err != nil {
|
|
log.G(ctx).WithError(err).Error("failed to resolve")
|
|
return err
|
|
}
|
|
log.G(ctx).WithField("image", name).Debug("fetching")
|
|
resolvedImageName = name
|
|
close(resolved)
|
|
|
|
eg.Go(func() error {
|
|
return imageStore.Put(ctx, name, desc)
|
|
})
|
|
|
|
return images.Dispatch(ctx,
|
|
images.Handlers(images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
|
ongoing.add(remotes.MakeRefKey(ctx, desc))
|
|
return nil, nil
|
|
}),
|
|
remotes.FetchHandler(cs, fetcher),
|
|
images.ChildrenHandler(cs)),
|
|
desc)
|
|
|
|
})
|
|
|
|
errs := make(chan error)
|
|
go func() {
|
|
defer close(errs)
|
|
errs <- eg.Wait()
|
|
}()
|
|
|
|
defer func() {
|
|
// we need new ctx here
|
|
ctx, cancel := appContext()
|
|
defer cancel()
|
|
// TODO(stevvooe): This section unpacks the layers and resolves the
|
|
// root filesystem chainid for the image. For now, we just print
|
|
// it, but we should keep track of this in the metadata storage.
|
|
|
|
image, err := imageStore.Get(ctx, resolvedImageName)
|
|
if err != nil {
|
|
log.G(ctx).Fatal(err)
|
|
}
|
|
|
|
p, err := content.ReadBlob(ctx, cs, image.Target.Digest)
|
|
if err != nil {
|
|
log.G(ctx).Fatal(err)
|
|
}
|
|
|
|
var manifest ocispec.Manifest
|
|
if err := json.Unmarshal(p, &manifest); err != nil {
|
|
log.G(ctx).Fatal(err)
|
|
}
|
|
|
|
conn, err := connectGRPC(clicontext)
|
|
if err != nil {
|
|
log.G(ctx).Fatal(err)
|
|
}
|
|
rootfs := rootfsservice.NewUnpackerFromClient(rootfsapi.NewRootFSClient(conn))
|
|
|
|
log.G(ctx).Info("unpacking rootfs")
|
|
chainID, err := rootfs.Unpack(ctx, manifest.Layers)
|
|
if err != nil {
|
|
log.G(ctx).Fatal(err)
|
|
}
|
|
|
|
diffIDs, err := image.RootFS(ctx, cs)
|
|
if err != nil {
|
|
log.G(ctx).WithError(err).Fatal("failed resolving rootfs")
|
|
}
|
|
|
|
expectedChainID := identity.ChainID(diffIDs)
|
|
if expectedChainID != chainID {
|
|
log.G(ctx).Fatal("rootfs service did not match chainid")
|
|
}
|
|
}()
|
|
|
|
var (
|
|
ticker = time.NewTicker(100 * time.Millisecond)
|
|
fw = progress.NewWriter(os.Stdout)
|
|
start = time.Now()
|
|
done bool
|
|
)
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
fw.Flush()
|
|
|
|
tw := tabwriter.NewWriter(fw, 1, 8, 1, ' ', 0)
|
|
js := ongoing.jobs()
|
|
statuses := map[string]statusInfo{}
|
|
|
|
activeSeen := map[string]struct{}{}
|
|
if !done {
|
|
active, err := cs.Status(ctx, "")
|
|
if err != nil {
|
|
log.G(ctx).WithError(err).Error("active check failed")
|
|
continue
|
|
}
|
|
// update status of active entries!
|
|
for _, active := range active {
|
|
statuses[active.Ref] = statusInfo{
|
|
Ref: active.Ref,
|
|
Status: "downloading",
|
|
Offset: active.Offset,
|
|
Total: active.Total,
|
|
StartedAt: active.StartedAt,
|
|
UpdatedAt: active.UpdatedAt,
|
|
}
|
|
activeSeen[active.Ref] = struct{}{}
|
|
}
|
|
}
|
|
|
|
// now, update the items in jobs that are not in active
|
|
for _, j := range js {
|
|
if _, ok := activeSeen[j]; ok {
|
|
continue
|
|
}
|
|
status := "done"
|
|
|
|
if j == ref {
|
|
select {
|
|
case <-resolved:
|
|
status = "resolved"
|
|
default:
|
|
status = "resolving"
|
|
}
|
|
}
|
|
|
|
statuses[j] = statusInfo{
|
|
Ref: j,
|
|
Status: status, // for now!
|
|
}
|
|
}
|
|
|
|
var ordered []statusInfo
|
|
for _, j := range js {
|
|
ordered = append(ordered, statuses[j])
|
|
}
|
|
|
|
display(tw, ordered, start)
|
|
tw.Flush()
|
|
|
|
if done {
|
|
fw.Flush()
|
|
return nil
|
|
}
|
|
case err := <-errs:
|
|
if err != nil {
|
|
return err
|
|
}
|
|
done = true
|
|
case <-ctx.Done():
|
|
done = true // allow ui to update once more
|
|
}
|
|
}
|
|
|
|
},
|
|
}
|