diff --git a/client.go b/client.go index f017337a5..06fd3d571 100644 --- a/client.go +++ b/client.go @@ -21,6 +21,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/remotes/docker/schema1" contentservice "github.com/containerd/containerd/services/content" "github.com/containerd/containerd/services/diff" diffservice "github.com/containerd/containerd/services/diff" @@ -234,6 +235,11 @@ type RemoteContext struct { // These handlers always get called before any operation specific // handlers. BaseHandlers []images.Handler + + // ConvertSchema1 is whether to convert Docker registry schema 1 + // manifests. If this option is false then any image which resolves + // to schema 1 will return an error since schema 1 is not supported. + ConvertSchema1 bool } func defaultRemoteContext() *RemoteContext { @@ -252,6 +258,14 @@ func WithPullUnpack(client *Client, c *RemoteContext) error { return nil } +// WithSchema1Conversion is used to convert Docker registry schema 1 +// manifests to oci manifests on pull. Without this option schema 1 +// manifests will return a not supported error. +func WithSchema1Conversion(client *Client, c *RemoteContext) error { + c.ConvertSchema1 = true + return nil +} + // WithResolver specifies the resolver to use. func WithResolver(resolver remotes.Resolver) RemoteOpts { return func(client *Client, c *RemoteContext) error { @@ -286,13 +300,30 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpts) (Imag return nil, err } - handlers := append(pullCtx.BaseHandlers, - remotes.FetchHandler(store, fetcher), - images.ChildrenHandler(store), + var ( + schema1Converter *schema1.Converter + handler images.Handler ) - if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil { + if desc.MediaType == images.MediaTypeDockerSchema1Manifest && pullCtx.ConvertSchema1 { + schema1Converter = schema1.NewConverter(store, fetcher) + handler = images.Handlers(append(pullCtx.BaseHandlers, schema1Converter)...) + } else { + handler = images.Handlers(append(pullCtx.BaseHandlers, + remotes.FetchHandler(store, fetcher), + images.ChildrenHandler(store))..., + ) + } + + if err := images.Dispatch(ctx, handler, desc); err != nil { return nil, err } + if schema1Converter != nil { + desc, err = schema1Converter.Convert(ctx) + if err != nil { + return nil, err + } + } + is := c.ImageService() if err := is.Put(ctx, name, desc); err != nil { return nil, err diff --git a/cmd/dist/fetch.go b/cmd/dist/fetch.go index 2d2abaa05..e541e575c 100644 --- a/cmd/dist/fetch.go +++ b/cmd/dist/fetch.go @@ -74,13 +74,15 @@ func fetch(ctx context.Context, ref string, clicontext *cli.Context) (containerd }() h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - ongoing.add(desc) + if desc.MediaType != images.MediaTypeDockerSchema1Manifest { + ongoing.add(desc) + } return nil, nil }) log.G(pctx).WithField("image", ref).Debug("fetching") - img, err := client.Pull(pctx, ref, containerd.WithResolver(resolver), containerd.WithImageHandler(h)) + img, err := client.Pull(pctx, ref, containerd.WithResolver(resolver), containerd.WithImageHandler(h), containerd.WithSchema1Conversion) stopProgress() if err != nil { return nil, err @@ -268,7 +270,10 @@ func display(w io.Writer, statuses []statusInfo, start time.Time) { total += status.Offset switch status.Status { case "downloading", "uploading": - bar := progress.Bar(float64(status.Offset) / float64(status.Total)) + var bar progress.Bar + if status.Total > 0.0 { + bar = progress.Bar(float64(status.Offset) / float64(status.Total)) + } fmt.Fprintf(w, "%s:\t%s\t%40r\t%8.8s/%s\t\n", status.Ref, status.Status, diff --git a/images/mediatypes.go b/images/mediatypes.go index 7b75b8aff..676af4a3e 100644 --- a/images/mediatypes.go +++ b/images/mediatypes.go @@ -16,4 +16,6 @@ const ( MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+json" + // Legacy Docker schema1 manifest + MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" ) diff --git a/remotes/docker/fetcher.go b/remotes/docker/fetcher.go index ad118fd1d..24c2a6a54 100644 --- a/remotes/docker/fetcher.go +++ b/remotes/docker/fetcher.go @@ -67,6 +67,7 @@ func getV2URLPaths(desc ocispec.Descriptor) ([]string, error) { switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: urls = append(urls, path.Join("manifests", desc.Digest.String())) } diff --git a/remotes/docker/schema1/converter.go b/remotes/docker/schema1/converter.go new file mode 100644 index 000000000..097520c32 --- /dev/null +++ b/remotes/docker/schema1/converter.go @@ -0,0 +1,391 @@ +package schema1 + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + mediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" +) + +// Converter converts schema1 manifests to schema2 on fetch +type Converter struct { + contentStore content.Store + fetcher remotes.Fetcher + + pulledManifest *manifest + + mu sync.Mutex + blobMap map[digest.Digest]digest.Digest +} + +func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter { + return &Converter{ + contentStore: contentStore, + fetcher: fetcher, + blobMap: map[digest.Digest]digest.Digest{}, + } +} + +func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + if err := c.fetchManifest(ctx, desc); err != nil { + return nil, err + } + + m := c.pulledManifest + if len(m.FSLayers) != len(m.History) { + return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") + } + descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) + + for i := range m.FSLayers { + var h v1History + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { + return nil, err + } + if !h.ThrowAway { + descs = append(descs, ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: c.pulledManifest.FSLayers[i].BlobSum, + }) + } + } + // Reverse + for i := 0; i <= len(descs)/2; i++ { + j := len(descs) - i - 1 + if i != j { + descs[i], descs[j] = descs[j], descs[i] + } + } + return descs, nil + case images.MediaTypeDockerSchema2LayerGzip: + if c.pulledManifest == nil { + return nil, errors.New("manifest required for schema 1 blob pull") + } + return nil, c.fetchBlob(ctx, desc) + default: + return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) + } +} + +func (c *Converter) Convert(ctx context.Context) (ocispec.Descriptor, error) { + if c.pulledManifest == nil { + return ocispec.Descriptor{}, errors.New("missing schema 1 manifest for conversion") + } + + img, err := convertSchema1Manifest(c.pulledManifest, c.blobMap) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed") + } + b, err := json.Marshal(img) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + } + + config := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.Canonical.FromBytes(b), + Size: int64(len(b)), + } + + ref := remotes.MakeRefKey(ctx, config) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config.Size, config.Digest); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + } + + layers := make([]ocispec.Descriptor, 0) + for _, layer := range c.pulledManifest.FSLayers { + // TODO: Use rootfs mapping! + info, err := c.contentStore.Info(ctx, layer.BlobSum) + if err != nil { + if content.IsNotFound(err) { + continue + } + return ocispec.Descriptor{}, errors.Wrap(err, "failed to get blob info") + } + + layers = append([]ocispec.Descriptor{{ + MediaType: ocispec.MediaTypeImageLayerGzip, + Digest: layer.BlobSum, + Size: info.Size, + }}, layers...) + } + + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: config, + Layers: layers, + } + + b, err = json.Marshal(manifest) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + } + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: digest.Canonical.FromBytes(b), + Size: int64(len(b)), + } + + ref = remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), desc.Size, desc.Digest); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config") + } + + return desc, nil +} + +func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch schema 1") + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + + b, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + return err + } + + b, err = stripSignature(b) + if err != nil { + return err + } + + var m manifest + if err := json.Unmarshal(b, &m); err != nil { + return err + } + c.pulledManifest = &m + + return nil +} + +func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch blob") + + ref := remotes.MakeRefKey(ctx, desc) + + var diffID digest.Digest + + cw, err := c.contentStore.Writer(ctx, ref, desc.Size, desc.Digest) + if err != nil { + if !content.IsExists(err) { + return err + } + + // TODO: Check if blob -> diff id mapping already exists + + r, err := c.contentStore.Reader(ctx, desc.Digest) + if err != nil { + return err + } + defer r.Close() + + gr, err := gzip.NewReader(r) + defer gr.Close() + + diffID, err = digest.Canonical.FromReader(gr) + if err != nil { + return err + } + } else { + defer cw.Close() + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + eg, _ := errgroup.WithContext(ctx) + pr, pw := io.Pipe() + + eg.Go(func() error { + gr, err := gzip.NewReader(pr) + defer gr.Close() + + diffID, err = digest.Canonical.FromReader(gr) + pr.CloseWithError(err) + return err + }) + + eg.Go(func() error { + defer pw.Close() + return content.Copy(cw, io.TeeReader(rc, pw), desc.Size, desc.Digest) + }) + + if err := eg.Wait(); err != nil { + return err + } + } + + c.mu.Lock() + c.blobMap[desc.Digest] = diffID + c.mu.Unlock() + + return nil +} + +type fsLayer struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type history struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type manifest struct { + FSLayers []fsLayer `json:"fsLayers"` + History []history `json:"history"` +} + +type v1History struct { + Author string `json:"author,omitempty"` + Created time.Time `json:"created"` + Comment string `json:"comment,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + ContainerConfig struct { + Cmd []string `json:"Cmd,omitempty"` + } `json:"container_config,omitempty"` +} + +func convertSchema1Manifest(m *manifest, blobs map[digest.Digest]digest.Digest) (*ocispec.Image, error) { + if len(m.History) == 0 { + return nil, errors.New("no history") + } + + var img ocispec.Image + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history") + } + + diffIDs := make([]digest.Digest, 0, len(m.History)) + img.History = make([]ocispec.History, len(m.History)) + for i := range m.History { + var h v1History + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal history") + } + + img.History[len(img.History)-i-1] = ocispec.History{ + Author: h.Author, + Comment: h.Comment, + Created: &h.Created, + CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), + EmptyLayer: h.ThrowAway, + } + + if !h.ThrowAway { + diffID, ok := blobs[m.FSLayers[i].BlobSum] + if !ok { + return nil, errors.Errorf("no diff id for blob %s", m.FSLayers[i].BlobSum.String()) + } + + diffIDs = append(diffIDs, diffID) + } + + } + + for i := 0; i <= len(diffIDs)/2; i++ { + j := len(diffIDs) - i - 1 + if i != j { + diffIDs[i], diffIDs[j] = diffIDs[j], diffIDs[i] + } + } + + img.RootFS = ocispec.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + + return &img, nil +} + +type signature struct { + Signatures []jsParsedSignature `json:"signatures"` +} + +type jsParsedSignature struct { + Protected string `json:"protected"` +} + +type protectedBlock struct { + Length int `json:"formatLength"` + Tail string `json:"formatTail"` +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func stripSignature(b []byte) ([]byte, error) { + var sig signature + if err := json.Unmarshal(b, &sig); err != nil { + return nil, err + } + if len(sig.Signatures) == 0 { + return nil, errors.New("no signatures") + } + pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) + if err != nil { + return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected) + } + + var protected protectedBlock + if err := json.Unmarshal(pb, &protected); err != nil { + return nil, err + } + + if protected.Length > len(b) { + return nil, errors.New("invalid protected length block") + } + + tail, err := joseBase64UrlDecode(protected.Tail) + if err != nil { + return nil, errors.Wrap(err, "invalid tail base 64 value") + } + + return append(b[:protected.Length], tail...), nil +} diff --git a/remotes/handlers.go b/remotes/handlers.go index 2fe2a1c7f..a5d9baf73 100644 --- a/remotes/handlers.go +++ b/remotes/handlers.go @@ -25,7 +25,7 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { return "manifest-" + desc.Digest.String() case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip: return "layer-" + desc.Digest.String() - case "application/vnd.docker.container.image.v1+json": + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: return "config-" + desc.Digest.String() default: log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType) @@ -47,6 +47,8 @@ func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc switch desc.MediaType { case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: return nil, fmt.Errorf("%v not yet supported", desc.MediaType) + case images.MediaTypeDockerSchema1Manifest: + return nil, fmt.Errorf("%v not supported", desc.MediaType) default: err := fetch(ctx, ingester, fetcher, desc) return nil, err