Add option to compress blobs on import

Change the default back to leave uncompressed and add
option to do the compression.

Signed-off-by: Derek McGowan <derek@mcgstyle.net>
This commit is contained in:
Derek McGowan 2019-07-25 12:03:54 -07:00
parent 41e172352c
commit 02826345cf
No known key found for this signature in database
GPG Key ID: F58C5D0A4405ACDB
3 changed files with 66 additions and 14 deletions

View File

@ -72,6 +72,10 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb
Name: "no-unpack",
Usage: "skip unpacking the images, false by default",
},
cli.BoolFlag{
Name: "compress-blobs",
Usage: "compress uncompressed blobs when creating manifest (Docker format only)",
},
}, commands.SnapshotterFlags...),
Action: func(context *cli.Context) error {
@ -97,6 +101,10 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb
opts = append(opts, containerd.WithIndexName(idxName))
}
if context.Bool("compress-blobs") {
opts = append(opts, containerd.WithImportCompression())
}
opts = append(opts, containerd.WithAllPlatforms(context.Bool("all-platforms")))
client, ctx, cancel, err := commands.NewClient(context)

View File

@ -38,6 +38,22 @@ import (
"github.com/pkg/errors"
)
type importOpts struct {
compress bool
}
// ImportOpt is an option for importing an OCI index
type ImportOpt func(*importOpts) error
// WithImportCompression compresses uncompressed layers on import.
// This is used for import formats which do not include the manifest.
func WithImportCompression() ImportOpt {
return func(io *importOpts) error {
io.compress = true
return nil
}
}
// ImportIndex imports an index from a tar archive image bundle
// - implements Docker v1.1, v1.2 and OCI v1.
// - prefers OCI v1 when provided
@ -45,8 +61,7 @@ import (
// - normalizes Docker references and adds as OCI ref name
// e.g. alpine:latest -> docker.io/library/alpine:latest
// - existing OCI reference names are untouched
// - TODO: support option to compress layers on ingest
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) {
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
var (
tr = tar.NewReader(reader)
@ -58,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
}
symlinks = make(map[string]string)
blobs = make(map[string]ocispec.Descriptor)
iopts importOpts
)
for _, o := range opts {
if err := o(&iopts); err != nil {
return ocispec.Descriptor{}, err
}
}
for {
hdr, err := tr.Next()
if err == io.EOF {
@ -141,7 +164,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
}
config.MediaType = images.MediaTypeDockerSchema2Config
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs)
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
}
@ -217,7 +240,7 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size
return dgstr.Digest(), nil
}
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) {
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
layers := make([]ocispec.Descriptor, len(layerFiles))
descs := map[digest.Digest]*ocispec.Descriptor{}
filters := []string{}
@ -261,17 +284,23 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
}
if s.GetCompression() == compression.Uncompressed {
ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
labels := map[string]string{
"containerd.io/uncompressed": desc.Digest.String(),
}
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
if err != nil {
s.Close()
return nil, err
if compress {
ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
labels := map[string]string{
"containerd.io/uncompressed": desc.Digest.String(),
}
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
if err != nil {
s.Close()
return nil, err
}
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
} else {
layers[i].MediaType = images.MediaTypeDockerSchema2Layer
}
} else {
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
}
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
s.Close()
}

View File

@ -35,6 +35,7 @@ type importOpts struct {
imageRefT func(string) string
dgstRefT func(digest.Digest) string
allPlatforms bool
compress bool
}
// ImportOpt allows the caller to specify import specific options
@ -74,6 +75,15 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt {
}
}
// WithImportCompression compresses uncompressed layers on import.
// This is used for import formats which do not include the manifest.
func WithImportCompression() ImportOpt {
return func(c *importOpts) error {
c.compress = true
return nil
}
}
// Import imports an image from a Tar stream using reader.
// Caller needs to specify importer. Future version may use oci.v1 as the default.
// Note that unreferrenced blobs may be imported to the content store as well.
@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
}
defer done(ctx)
index, err := archive.ImportIndex(ctx, c.ContentStore(), reader)
var aio []archive.ImportOpt
if iopts.compress {
aio = append(aio, archive.WithImportCompression())
}
index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...)
if err != nil {
return nil, err
}