remotes/docker: implement seekable http requests
To support resumable download, the fetcher for a remote must implement `io.Seeker`. If implemented the `content.Copy` function will detect the seeker and begin from where the download was terminated by a previous attempt. Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
parent
a9308e174d
commit
682151b166
@ -2,7 +2,6 @@ package content
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -119,7 +118,7 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|||||||
if ok {
|
if ok {
|
||||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||||
if nn != offset {
|
if nn != offset {
|
||||||
return nil, fmt.Errorf("failed to seek to offset %v", offset)
|
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2,6 +2,7 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
@ -37,22 +38,55 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
|
||||||
for _, u := range urls {
|
for _, u := range urls {
|
||||||
|
rc, err := r.open(ctx, u, desc.MediaType, offset)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
continue // try one of the other urls.
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Wrapf(errdefs.ErrNotFound,
|
||||||
|
"could not fetch content descriptor %v (%v) from remote",
|
||||||
|
desc.Digest, desc.MediaType)
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) {
|
||||||
req, err := http.NewRequest(http.MethodGet, u, nil)
|
req, err := http.NewRequest(http.MethodGet, u, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
|
req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", "))
|
||||||
|
|
||||||
|
if offset > 0 {
|
||||||
|
// TODO(stevvooe): Only set this header in response to the
|
||||||
|
// "Accept-Ranges: bytes" header.
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := r.doRequestWithRetries(ctx, req, nil)
|
resp, err := r.doRequestWithRetries(ctx, req, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode > 299 {
|
if resp.StatusCode > 299 {
|
||||||
|
// TODO(stevvooe): When doing a offset specific request, we should
|
||||||
|
// really distinguish between a 206 and a 200. In the case of 200, we
|
||||||
|
// can discard the bytes, hiding the seek behavior from the
|
||||||
|
// implementation.
|
||||||
|
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
continue // try one of the other urls.
|
return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u)
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
|
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
|
||||||
}
|
}
|
||||||
@ -60,11 +94,6 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
|||||||
return resp.Body, nil
|
return resp.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrapf(errdefs.ErrNotFound,
|
|
||||||
"could not fetch content descriptor %v (%v) from remote",
|
|
||||||
desc.Digest, desc.MediaType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getV2URLPaths generates the candidate urls paths for the object based on the
|
// getV2URLPaths generates the candidate urls paths for the object based on the
|
||||||
// set of hints and the provided object id. URLs are returned in the order of
|
// set of hints and the provided object id. URLs are returned in the order of
|
||||||
// most to least likely succeed.
|
// most to least likely succeed.
|
||||||
|
125
remotes/docker/httpreadseeker.go
Normal file
125
remotes/docker/httpreadseeker.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/log"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type httpReadSeeker struct {
|
||||||
|
size int64
|
||||||
|
offset int64
|
||||||
|
rc io.ReadCloser
|
||||||
|
open func(offset int64) (io.ReadCloser, error)
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) {
|
||||||
|
return &httpReadSeeker{
|
||||||
|
size: size,
|
||||||
|
open: open,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
|
||||||
|
if hrs.closed {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
rd, err := hrs.reader()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = rd.Read(p)
|
||||||
|
hrs.offset += int64(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hrs *httpReadSeeker) Close() error {
|
||||||
|
if hrs.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hrs.closed = true
|
||||||
|
if hrs.rc != nil {
|
||||||
|
return hrs.rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if hrs.closed {
|
||||||
|
return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
abs := hrs.offset
|
||||||
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
|
abs = offset
|
||||||
|
case io.SeekCurrent:
|
||||||
|
abs += offset
|
||||||
|
case io.SeekEnd:
|
||||||
|
abs = hrs.size + offset
|
||||||
|
default:
|
||||||
|
return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence")
|
||||||
|
}
|
||||||
|
|
||||||
|
if abs < 0 {
|
||||||
|
return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset")
|
||||||
|
}
|
||||||
|
|
||||||
|
if abs != hrs.offset {
|
||||||
|
if hrs.rc != nil {
|
||||||
|
if err := hrs.rc.Close(); err != nil {
|
||||||
|
log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser")
|
||||||
|
}
|
||||||
|
|
||||||
|
hrs.rc = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hrs.offset = abs
|
||||||
|
}
|
||||||
|
|
||||||
|
return hrs.offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||||
|
if hrs.rc != nil {
|
||||||
|
return hrs.rc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hrs.offset < hrs.size {
|
||||||
|
// only try to reopen the body request if we are seeking to a value
|
||||||
|
// less than the actual size.
|
||||||
|
if hrs.open == nil {
|
||||||
|
return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open")
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := hrs.open(hrs.offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "httpReaderSeeker: failed open")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hrs.rc != nil {
|
||||||
|
if err := hrs.rc.Close(); err != nil {
|
||||||
|
log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hrs.rc = rc
|
||||||
|
} else {
|
||||||
|
// There is an edge case here where offset == size of the content. If
|
||||||
|
// we seek, we will probably get an error for content that cannot be
|
||||||
|
// sought (?). In that case, we should err on committing the content,
|
||||||
|
// as the length is already satisified but we just return the empty
|
||||||
|
// reader instead.
|
||||||
|
|
||||||
|
hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
return hrs.rc, nil
|
||||||
|
}
|
@ -298,7 +298,7 @@ func (r *dockerBase) authorize(req *http.Request) {
|
|||||||
|
|
||||||
func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
|
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
|
||||||
log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("Do request")
|
log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request")
|
||||||
r.authorize(req)
|
r.authorize(req)
|
||||||
resp, err := ctxhttp.Do(ctx, r.client, req)
|
resp, err := ctxhttp.Do(ctx, r.client, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user