
Now that we have most of the services required for use with containerd, it was found that common patterns were used throughout services. By defining a central `errdefs` package, we ensure that services will map errors to and from grpc consistently and cleanly. One can decorate an error with as much context as necessary, using `pkg/errors` and still have the error mapped correctly via grpc. We make a few sacrifices. At this point, the common errors we use across the repository all map directly to grpc error codes. While this seems positively crazy, it actually works out quite well. The error conditions that were specific weren't super necessary and the ones that were necessary now simply have better context information. We lose the ability to add new codes, but this constraint may not be a bad thing. Effectively, as long as one uses the errors defined in `errdefs`, the error class will be mapped correctly across the grpc boundary and everything will be good. If you don't use those definitions, the error maps to "unknown" and the error message is preserved. Signed-off-by: Stephen J Day <stephen.day@docker.com>
142 lines
4.1 KiB
Go
142 lines
4.1 KiB
Go
package remotes
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/Sirupsen/logrus"
|
|
"github.com/containerd/containerd/content"
|
|
"github.com/containerd/containerd/errdefs"
|
|
"github.com/containerd/containerd/images"
|
|
"github.com/containerd/containerd/log"
|
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
)
|
|
|
|
// MakeRef returns a unique reference for the descriptor. This reference can be
|
|
// used to lookup ongoing processes related to the descriptor. This function
|
|
// may look to the context to namespace the reference appropriately.
|
|
func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
|
|
// TODO(stevvooe): Need better remote key selection here. Should be a
|
|
// product of the context, which may include information about the ongoing
|
|
// fetch process.
|
|
switch desc.MediaType {
|
|
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
|
return "manifest-" + desc.Digest.String()
|
|
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
|
return "index-" + desc.Digest.String()
|
|
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
|
|
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
|
|
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip:
|
|
return "layer-" + desc.Digest.String()
|
|
case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
|
return "config-" + desc.Digest.String()
|
|
default:
|
|
log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType)
|
|
return "unknown-" + desc.Digest.String()
|
|
}
|
|
}
|
|
|
|
// FetchHandler returns a handler that will fetch all content into the ingester
|
|
// discovered in a call to Dispatch. Use with ChildrenHandler to do a full
|
|
// recursive fetch.
|
|
func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc {
|
|
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
|
|
"digest": desc.Digest,
|
|
"mediatype": desc.MediaType,
|
|
"size": desc.Size,
|
|
}))
|
|
|
|
switch desc.MediaType {
|
|
case images.MediaTypeDockerSchema1Manifest:
|
|
return nil, fmt.Errorf("%v not supported", desc.MediaType)
|
|
default:
|
|
err := fetch(ctx, ingester, fetcher, desc)
|
|
return nil, err
|
|
}
|
|
}
|
|
}
|
|
|
|
func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error {
|
|
log.G(ctx).Debug("fetch")
|
|
|
|
var (
|
|
ref = MakeRefKey(ctx, desc)
|
|
cw content.Writer
|
|
err error
|
|
retry = 16
|
|
)
|
|
for {
|
|
cw, err = ingester.Writer(ctx, ref, desc.Size, desc.Digest)
|
|
if err != nil {
|
|
if errdefs.IsAlreadyExists(err) {
|
|
return nil
|
|
} else if !errdefs.IsUnavailable(err) {
|
|
return err
|
|
}
|
|
|
|
// TODO: On first time locked is encountered, get status
|
|
// of writer and abort if not updated recently.
|
|
|
|
select {
|
|
case <-time.After(time.Millisecond * time.Duration(retry)):
|
|
if retry < 2048 {
|
|
retry = retry << 1
|
|
}
|
|
continue
|
|
case <-ctx.Done():
|
|
// Propagate lock error
|
|
return err
|
|
}
|
|
}
|
|
defer cw.Close()
|
|
break
|
|
}
|
|
|
|
rc, err := fetcher.Fetch(ctx, desc)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer rc.Close()
|
|
|
|
return content.Copy(cw, rc, desc.Size, desc.Digest)
|
|
}
|
|
|
|
// PushHandler returns a handler that will push all content from the provider
|
|
// using a writer from the pusher.
|
|
func PushHandler(provider content.Provider, pusher Pusher) images.HandlerFunc {
|
|
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
|
|
"digest": desc.Digest,
|
|
"mediatype": desc.MediaType,
|
|
"size": desc.Size,
|
|
}))
|
|
|
|
err := push(ctx, provider, pusher, desc)
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error {
|
|
log.G(ctx).Debug("push")
|
|
|
|
cw, err := pusher.Push(ctx, desc)
|
|
if err != nil {
|
|
if !errdefs.IsAlreadyExists(err) {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
defer cw.Close()
|
|
|
|
rc, err := provider.Reader(ctx, desc.Digest)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer rc.Close()
|
|
|
|
return content.Copy(cw, rc, desc.Size, desc.Digest)
|
|
}
|