Move remotes to core/remotes
Signed-off-by: Derek McGowan <derek@mcg.dev>
This commit is contained in:
225
core/remotes/docker/auth/fetch.go
Normal file
225
core/remotes/docker/auth/fetch.go
Normal file
@@ -0,0 +1,225 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/containerd/v2/version"
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoToken is returned if a request is successful but the body does not
|
||||
// contain an authorization token.
|
||||
ErrNoToken = errors.New("authorization server did not include a token in the response")
|
||||
)
|
||||
|
||||
// GenerateTokenOptions generates options for fetching a token based on a challenge
|
||||
func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) {
|
||||
realm, ok := c.Parameters["realm"]
|
||||
if !ok {
|
||||
return TokenOptions{}, errors.New("no realm specified for token auth challenge")
|
||||
}
|
||||
|
||||
realmURL, err := url.Parse(realm)
|
||||
if err != nil {
|
||||
return TokenOptions{}, fmt.Errorf("invalid token auth challenge realm: %w", err)
|
||||
}
|
||||
|
||||
to := TokenOptions{
|
||||
Realm: realmURL.String(),
|
||||
Service: c.Parameters["service"],
|
||||
Username: username,
|
||||
Secret: secret,
|
||||
}
|
||||
|
||||
scope, ok := c.Parameters["scope"]
|
||||
if ok {
|
||||
to.Scopes = append(to.Scopes, strings.Split(scope, " ")...)
|
||||
} else {
|
||||
log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge")
|
||||
}
|
||||
|
||||
return to, nil
|
||||
}
|
||||
|
||||
// TokenOptions are options for requesting a token
|
||||
type TokenOptions struct {
|
||||
Realm string
|
||||
Service string
|
||||
Scopes []string
|
||||
Username string
|
||||
Secret string
|
||||
|
||||
// FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token.
|
||||
//
|
||||
// For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request.
|
||||
// https://docs.docker.com/registry/spec/auth/token/#requesting-a-token
|
||||
//
|
||||
// For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request.
|
||||
// https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token
|
||||
FetchRefreshToken bool
|
||||
}
|
||||
|
||||
// OAuthTokenResponse is response from fetching token with a OAuth POST request
|
||||
type OAuthTokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// FetchTokenWithOAuth fetches a token using a POST request
|
||||
func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) {
|
||||
form := url.Values{}
|
||||
if len(to.Scopes) > 0 {
|
||||
form.Set("scope", strings.Join(to.Scopes, " "))
|
||||
}
|
||||
form.Set("service", to.Service)
|
||||
form.Set("client_id", clientID)
|
||||
|
||||
if to.Username == "" {
|
||||
form.Set("grant_type", "refresh_token")
|
||||
form.Set("refresh_token", to.Secret)
|
||||
} else {
|
||||
form.Set("grant_type", "password")
|
||||
form.Set("username", to.Username)
|
||||
form.Set("password", to.Secret)
|
||||
}
|
||||
if to.FetchRefreshToken {
|
||||
form.Set("access_type", "offline")
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, to.Realm, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||
for k, v := range headers {
|
||||
req.Header[k] = append(req.Header[k], v...)
|
||||
}
|
||||
if len(req.Header.Get("User-Agent")) == 0 {
|
||||
req.Header.Set("User-Agent", "containerd/"+version.Version)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
var tr OAuthTokenResponse
|
||||
if err = decoder.Decode(&tr); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode token response: %w", err)
|
||||
}
|
||||
|
||||
if tr.AccessToken == "" {
|
||||
return nil, ErrNoToken
|
||||
}
|
||||
|
||||
return &tr, nil
|
||||
}
|
||||
|
||||
// FetchTokenResponse is response from fetching token with GET request
|
||||
type FetchTokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
}
|
||||
|
||||
// FetchToken fetches a token using a GET request
|
||||
func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k, v := range headers {
|
||||
req.Header[k] = append(req.Header[k], v...)
|
||||
}
|
||||
if len(req.Header.Get("User-Agent")) == 0 {
|
||||
req.Header.Set("User-Agent", "containerd/"+version.Version)
|
||||
}
|
||||
|
||||
reqParams := req.URL.Query()
|
||||
|
||||
if to.Service != "" {
|
||||
reqParams.Add("service", to.Service)
|
||||
}
|
||||
|
||||
for _, scope := range to.Scopes {
|
||||
reqParams.Add("scope", scope)
|
||||
}
|
||||
|
||||
if to.Secret != "" {
|
||||
req.SetBasicAuth(to.Username, to.Secret)
|
||||
}
|
||||
|
||||
if to.FetchRefreshToken {
|
||||
reqParams.Add("offline_token", "true")
|
||||
}
|
||||
|
||||
req.URL.RawQuery = reqParams.Encode()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
var tr FetchTokenResponse
|
||||
if err = decoder.Decode(&tr); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode token response: %w", err)
|
||||
}
|
||||
|
||||
// `access_token` is equivalent to `token` and if both are specified
|
||||
// the choice is undefined. Canonicalize `access_token` by sticking
|
||||
// things in `token`.
|
||||
if tr.AccessToken != "" {
|
||||
tr.Token = tr.AccessToken
|
||||
}
|
||||
|
||||
if tr.Token == "" {
|
||||
return nil, ErrNoToken
|
||||
}
|
||||
|
||||
return &tr, nil
|
||||
}
|
||||
114
core/remotes/docker/auth/fetch_test.go
Normal file
114
core/remotes/docker/auth/fetch_test.go
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGenerateTokenOptions(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
realm string
|
||||
service string
|
||||
username string
|
||||
secret string
|
||||
scope string
|
||||
}{
|
||||
{
|
||||
name: "MultipleScopes",
|
||||
realm: "https://test-realm.com",
|
||||
service: "registry-service",
|
||||
username: "username",
|
||||
secret: "secret",
|
||||
scope: "repository:foo/bar:pull repository:foo/bar:pull,push",
|
||||
},
|
||||
{
|
||||
name: "SingleScope",
|
||||
realm: "https://test-realm.com",
|
||||
service: "registry-service",
|
||||
username: "username",
|
||||
secret: "secret",
|
||||
scope: "repository:foo/bar:pull",
|
||||
},
|
||||
{
|
||||
name: "NoScope",
|
||||
realm: "https://test-realm.com",
|
||||
service: "registry-service",
|
||||
username: "username",
|
||||
secret: "secret",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c := Challenge{
|
||||
Scheme: BearerAuth,
|
||||
Parameters: map[string]string{
|
||||
"realm": tc.realm,
|
||||
"service": tc.service,
|
||||
"scope": tc.scope,
|
||||
},
|
||||
}
|
||||
options, err := GenerateTokenOptions(context.Background(), "host", tc.username, tc.secret, c)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
expected := TokenOptions{
|
||||
Realm: tc.realm,
|
||||
Service: tc.service,
|
||||
Scopes: strings.Split(tc.scope, " "),
|
||||
Username: tc.username,
|
||||
Secret: tc.secret,
|
||||
}
|
||||
if !reflect.DeepEqual(options, expected) {
|
||||
t.Fatalf("expected %v, but got %v", expected, options)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("MissingRealm", func(t *testing.T) {
|
||||
c := Challenge{
|
||||
Scheme: BearerAuth,
|
||||
Parameters: map[string]string{
|
||||
"service": "service",
|
||||
"scope": "repository:foo/bar:pull,push",
|
||||
},
|
||||
}
|
||||
_, err := GenerateTokenOptions(context.Background(), "host", "username", "secret", c)
|
||||
if err == nil {
|
||||
t.Fatal("expected an err and got nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("RealmParseError", func(t *testing.T) {
|
||||
c := Challenge{
|
||||
Scheme: BearerAuth,
|
||||
Parameters: map[string]string{
|
||||
"realm": "127.0.0.1:8080",
|
||||
"service": "service",
|
||||
"scope": "repository:foo/bar:pull,push",
|
||||
},
|
||||
}
|
||||
_, err := GenerateTokenOptions(context.Background(), "host", "username", "secret", c)
|
||||
if err == nil {
|
||||
t.Fatal("expected an err and got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
200
core/remotes/docker/auth/parse.go
Normal file
200
core/remotes/docker/auth/parse.go
Normal file
@@ -0,0 +1,200 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AuthenticationScheme defines scheme of the authentication method
|
||||
type AuthenticationScheme byte
|
||||
|
||||
const (
|
||||
// BasicAuth is scheme for Basic HTTP Authentication RFC 7617
|
||||
BasicAuth AuthenticationScheme = 1 << iota
|
||||
// DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616
|
||||
DigestAuth
|
||||
// BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750
|
||||
BearerAuth
|
||||
)
|
||||
|
||||
// Challenge carries information from a WWW-Authenticate response header.
|
||||
// See RFC 2617.
|
||||
type Challenge struct {
|
||||
// scheme is the auth-scheme according to RFC 2617
|
||||
Scheme AuthenticationScheme
|
||||
|
||||
// parameters are the auth-params according to RFC 2617
|
||||
Parameters map[string]string
|
||||
}
|
||||
|
||||
type byScheme []Challenge
|
||||
|
||||
func (bs byScheme) Len() int { return len(bs) }
|
||||
func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
|
||||
|
||||
// Less sorts in priority order: token > digest > basic
|
||||
func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme }
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
type octetType byte
|
||||
|
||||
var octetTypes [256]octetType
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
// ParseAuthHeader parses challenges from WWW-Authenticate header
|
||||
func ParseAuthHeader(header http.Header) []Challenge {
|
||||
challenges := []Challenge{}
|
||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||
v, p := parseValueAndParams(h)
|
||||
var s AuthenticationScheme
|
||||
switch v {
|
||||
case "basic":
|
||||
s = BasicAuth
|
||||
case "digest":
|
||||
s = DigestAuth
|
||||
case "bearer":
|
||||
s = BearerAuth
|
||||
default:
|
||||
continue
|
||||
}
|
||||
challenges = append(challenges, Challenge{Scheme: s, Parameters: p})
|
||||
}
|
||||
sort.Stable(byScheme(challenges))
|
||||
return challenges
|
||||
}
|
||||
|
||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s := expectToken(header)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
for {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ",") {
|
||||
return
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
95
core/remotes/docker/auth/parse_test.go
Normal file
95
core/remotes/docker/auth/parse_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseAuthHeaderBearer(t *testing.T) {
|
||||
headerTemplate := `Bearer realm="%s",service="%s",scope="%s"`
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
realm string
|
||||
service string
|
||||
scope string
|
||||
}{
|
||||
{
|
||||
name: "SingleScope",
|
||||
realm: "https://auth.docker.io/token",
|
||||
service: "registry.docker.io",
|
||||
scope: "repository:foo/bar:pull,push",
|
||||
},
|
||||
{
|
||||
name: "MultipleScopes",
|
||||
realm: "https://auth.docker.io/token",
|
||||
service: "registry.docker.io",
|
||||
scope: "repository:foo/bar:pull,push repository:foo/baz:pull repository:foo/foo:push",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
expected := []Challenge{
|
||||
{
|
||||
Scheme: BearerAuth,
|
||||
Parameters: map[string]string{
|
||||
"realm": tc.realm,
|
||||
"service": tc.service,
|
||||
"scope": tc.scope,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
hdr := http.Header{
|
||||
http.CanonicalHeaderKey("WWW-Authenticate"): []string{fmt.Sprintf(
|
||||
headerTemplate, tc.realm, tc.service, tc.scope,
|
||||
)},
|
||||
}
|
||||
actual := ParseAuthHeader(hdr)
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Fatalf("expected %v, but got %v", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAuthHeader(t *testing.T) {
|
||||
v := `Bearer realm="https://auth.example.io/token",empty="",service="registry.example.io",scope="repository:library/hello-world:pull,push"`
|
||||
h := http.Header{http.CanonicalHeaderKey("WWW-Authenticate"): []string{v}}
|
||||
challenge := ParseAuthHeader(h)
|
||||
|
||||
actual, ok := challenge[0].Parameters["empty"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "", actual)
|
||||
|
||||
actual, ok = challenge[0].Parameters["service"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "registry.example.io", actual)
|
||||
}
|
||||
|
||||
func FuzzParseAuthHeader(f *testing.F) {
|
||||
f.Add(`Bearer realm="https://example.com/token",service="example.com",scope="repository:foo/bar:pull,push"`)
|
||||
f.Fuzz(func(t *testing.T, v string) {
|
||||
h := http.Header{http.CanonicalHeaderKey("WWW-Authenticate"): []string{v}}
|
||||
_ = ParseAuthHeader(h)
|
||||
})
|
||||
}
|
||||
361
core/remotes/docker/authorizer.go
Normal file
361
core/remotes/docker/authorizer.go
Normal file
@@ -0,0 +1,361 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker/auth"
|
||||
remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
type dockerAuthorizer struct {
|
||||
credentials func(string) (string, string, error)
|
||||
|
||||
client *http.Client
|
||||
header http.Header
|
||||
mu sync.RWMutex
|
||||
|
||||
// indexed by host name
|
||||
handlers map[string]*authHandler
|
||||
|
||||
onFetchRefreshToken OnFetchRefreshToken
|
||||
}
|
||||
|
||||
type authorizerConfig struct {
|
||||
credentials func(string) (string, string, error)
|
||||
client *http.Client
|
||||
header http.Header
|
||||
onFetchRefreshToken OnFetchRefreshToken
|
||||
}
|
||||
|
||||
// AuthorizerOpt configures an authorizer
|
||||
type AuthorizerOpt func(*authorizerConfig)
|
||||
|
||||
// WithAuthClient provides the HTTP client for the authorizer
|
||||
func WithAuthClient(client *http.Client) AuthorizerOpt {
|
||||
return func(opt *authorizerConfig) {
|
||||
opt.client = client
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthCreds provides a credential function to the authorizer
|
||||
func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt {
|
||||
return func(opt *authorizerConfig) {
|
||||
opt.credentials = creds
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthHeader provides HTTP headers for authorization
|
||||
func WithAuthHeader(hdr http.Header) AuthorizerOpt {
|
||||
return func(opt *authorizerConfig) {
|
||||
opt.header = hdr
|
||||
}
|
||||
}
|
||||
|
||||
// OnFetchRefreshToken is called on fetching request token.
|
||||
type OnFetchRefreshToken func(ctx context.Context, refreshToken string, req *http.Request)
|
||||
|
||||
// WithFetchRefreshToken enables fetching "refresh token" (aka "identity token", "offline token").
|
||||
func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt {
|
||||
return func(opt *authorizerConfig) {
|
||||
opt.onFetchRefreshToken = f
|
||||
}
|
||||
}
|
||||
|
||||
// NewDockerAuthorizer creates an authorizer using Docker's registry
|
||||
// authentication spec.
|
||||
// See https://docs.docker.com/registry/spec/auth/
|
||||
func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer {
|
||||
var ao authorizerConfig
|
||||
for _, opt := range opts {
|
||||
opt(&ao)
|
||||
}
|
||||
|
||||
if ao.client == nil {
|
||||
ao.client = http.DefaultClient
|
||||
}
|
||||
|
||||
return &dockerAuthorizer{
|
||||
credentials: ao.credentials,
|
||||
client: ao.client,
|
||||
header: ao.header,
|
||||
handlers: make(map[string]*authHandler),
|
||||
onFetchRefreshToken: ao.onFetchRefreshToken,
|
||||
}
|
||||
}
|
||||
|
||||
// Authorize handles auth request.
|
||||
func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error {
|
||||
// skip if there is no auth handler
|
||||
ah := a.getAuthHandler(req.URL.Host)
|
||||
if ah == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
auth, refreshToken, err := ah.authorize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", auth)
|
||||
|
||||
if refreshToken != "" {
|
||||
a.mu.RLock()
|
||||
onFetchRefreshToken := a.onFetchRefreshToken
|
||||
a.mu.RUnlock()
|
||||
if onFetchRefreshToken != nil {
|
||||
onFetchRefreshToken(ctx, refreshToken, req)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
return a.handlers[host]
|
||||
}
|
||||
|
||||
func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error {
|
||||
last := responses[len(responses)-1]
|
||||
host := last.Request.URL.Host
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
for _, c := range auth.ParseAuthHeader(last.Header) {
|
||||
if c.Scheme == auth.BearerAuth {
|
||||
if retry, err := invalidAuthorization(ctx, c, responses); err != nil {
|
||||
delete(a.handlers, host)
|
||||
return err
|
||||
} else if retry {
|
||||
delete(a.handlers, host)
|
||||
}
|
||||
|
||||
// reuse existing handler
|
||||
//
|
||||
// assume that one registry will return the common
|
||||
// challenge information, including realm and service.
|
||||
// and the resource scope is only different part
|
||||
// which can be provided by each request.
|
||||
if _, ok := a.handlers[host]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var username, secret string
|
||||
if a.credentials != nil {
|
||||
var err error
|
||||
username, secret, err = a.credentials(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
common.FetchRefreshToken = a.onFetchRefreshToken != nil
|
||||
|
||||
a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common)
|
||||
return nil
|
||||
} else if c.Scheme == auth.BasicAuth && a.credentials != nil {
|
||||
username, secret, err := a.credentials(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if username == "" || secret == "" {
|
||||
return fmt.Errorf("%w: no basic auth credentials", ErrInvalidAuthorization)
|
||||
}
|
||||
|
||||
a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, auth.TokenOptions{
|
||||
Username: username,
|
||||
Secret: secret,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// authResult is used to control limit rate.
|
||||
type authResult struct {
|
||||
sync.WaitGroup
|
||||
token string
|
||||
refreshToken string
|
||||
err error
|
||||
}
|
||||
|
||||
// authHandler is used to handle auth request per registry server.
|
||||
type authHandler struct {
|
||||
sync.Mutex
|
||||
|
||||
header http.Header
|
||||
|
||||
client *http.Client
|
||||
|
||||
// only support basic and bearer schemes
|
||||
scheme auth.AuthenticationScheme
|
||||
|
||||
// common contains common challenge answer
|
||||
common auth.TokenOptions
|
||||
|
||||
// scopedTokens caches token indexed by scopes, which used in
|
||||
// bearer auth case
|
||||
scopedTokens map[string]*authResult
|
||||
}
|
||||
|
||||
func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler {
|
||||
return &authHandler{
|
||||
header: hdr,
|
||||
client: client,
|
||||
scheme: scheme,
|
||||
common: opts,
|
||||
scopedTokens: map[string]*authResult{},
|
||||
}
|
||||
}
|
||||
|
||||
func (ah *authHandler) authorize(ctx context.Context) (string, string, error) {
|
||||
switch ah.scheme {
|
||||
case auth.BasicAuth:
|
||||
return ah.doBasicAuth(ctx)
|
||||
case auth.BearerAuth:
|
||||
return ah.doBearerAuth(ctx)
|
||||
default:
|
||||
return "", "", fmt.Errorf("failed to find supported auth scheme: %s: %w", string(ah.scheme), errdefs.ErrNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
func (ah *authHandler) doBasicAuth(ctx context.Context) (string, string, error) {
|
||||
username, secret := ah.common.Username, ah.common.Secret
|
||||
|
||||
if username == "" || secret == "" {
|
||||
return "", "", fmt.Errorf("failed to handle basic auth because missing username or secret")
|
||||
}
|
||||
|
||||
auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret))
|
||||
return fmt.Sprintf("Basic %s", auth), "", nil
|
||||
}
|
||||
|
||||
func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken string, err error) {
|
||||
// copy common tokenOptions
|
||||
to := ah.common
|
||||
|
||||
to.Scopes = GetTokenScopes(ctx, to.Scopes)
|
||||
|
||||
// Docs: https://docs.docker.com/registry/spec/auth/scope
|
||||
scoped := strings.Join(to.Scopes, " ")
|
||||
|
||||
ah.Lock()
|
||||
if r, exist := ah.scopedTokens[scoped]; exist {
|
||||
ah.Unlock()
|
||||
r.Wait()
|
||||
return r.token, r.refreshToken, r.err
|
||||
}
|
||||
|
||||
// only one fetch token job
|
||||
r := new(authResult)
|
||||
r.Add(1)
|
||||
ah.scopedTokens[scoped] = r
|
||||
ah.Unlock()
|
||||
|
||||
defer func() {
|
||||
token = fmt.Sprintf("Bearer %s", token)
|
||||
r.token, r.refreshToken, r.err = token, refreshToken, err
|
||||
r.Done()
|
||||
}()
|
||||
|
||||
// fetch token for the resource scope
|
||||
if to.Secret != "" {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to fetch oauth token: %w", err)
|
||||
}
|
||||
}()
|
||||
// credential information is provided, use oauth POST endpoint
|
||||
// TODO: Allow setting client_id
|
||||
resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to)
|
||||
if err != nil {
|
||||
var errStatus remoteerrors.ErrUnexpectedStatus
|
||||
if errors.As(err, &errStatus) {
|
||||
// Registries without support for POST may return 404 for POST /v2/token.
|
||||
// As of September 2017, GCR is known to return 404.
|
||||
// As of February 2018, JFrog Artifactory is known to return 401.
|
||||
// As of January 2022, ACR is known to return 400.
|
||||
if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 || errStatus.StatusCode == 400 {
|
||||
resp, err := auth.FetchToken(ctx, ah.client, ah.header, to)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return resp.Token, resp.RefreshToken, nil
|
||||
}
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"status": errStatus.Status,
|
||||
"body": string(errStatus.Body),
|
||||
}).Debugf("token request failed")
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
return resp.AccessToken, resp.RefreshToken, nil
|
||||
}
|
||||
// do request anonymously
|
||||
resp, err := auth.FetchToken(ctx, ah.client, ah.header, to)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err)
|
||||
}
|
||||
return resp.Token, resp.RefreshToken, nil
|
||||
}
|
||||
|
||||
func invalidAuthorization(ctx context.Context, c auth.Challenge, responses []*http.Response) (retry bool, _ error) {
|
||||
errStr := c.Parameters["error"]
|
||||
if errStr == "" {
|
||||
return retry, nil
|
||||
}
|
||||
|
||||
n := len(responses)
|
||||
if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) {
|
||||
limitedErr := errStr
|
||||
errLenghLimit := 64
|
||||
if len(limitedErr) > errLenghLimit {
|
||||
limitedErr = limitedErr[:errLenghLimit] + "..."
|
||||
}
|
||||
log.G(ctx).WithField("error", limitedErr).Debug("authorization error using bearer token, retrying")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return retry, fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization)
|
||||
}
|
||||
|
||||
func sameRequest(r1, r2 *http.Request) bool {
|
||||
if r1.Method != r2.Method {
|
||||
return false
|
||||
}
|
||||
if *r1.URL != *r2.URL {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
42
core/remotes/docker/config/config_unix.go
Normal file
42
core/remotes/docker/config/config_unix.go
Normal file
@@ -0,0 +1,42 @@
|
||||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func hostPaths(root, host string) (hosts []string) {
|
||||
ch := hostDirectory(host)
|
||||
if ch != host {
|
||||
hosts = append(hosts, filepath.Join(root, ch))
|
||||
}
|
||||
|
||||
hosts = append(hosts,
|
||||
filepath.Join(root, host),
|
||||
filepath.Join(root, "_default"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func rootSystemPool() (*x509.CertPool, error) {
|
||||
return x509.SystemCertPool()
|
||||
}
|
||||
41
core/remotes/docker/config/config_windows.go
Normal file
41
core/remotes/docker/config/config_windows.go
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func hostPaths(root, host string) (hosts []string) {
|
||||
ch := hostDirectory(host)
|
||||
if ch != host {
|
||||
hosts = append(hosts, filepath.Join(root, strings.Replace(ch, ":", "", -1)))
|
||||
}
|
||||
|
||||
hosts = append(hosts,
|
||||
filepath.Join(root, strings.Replace(host, ":", "", -1)),
|
||||
filepath.Join(root, "_default"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func rootSystemPool() (*x509.CertPool, error) {
|
||||
return x509.NewCertPool(), nil
|
||||
}
|
||||
44
core/remotes/docker/config/docker_fuzzer_internal.go
Normal file
44
core/remotes/docker/config/docker_fuzzer_internal.go
Normal file
@@ -0,0 +1,44 @@
|
||||
//go:build gofuzz
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||
)
|
||||
|
||||
func FuzzParseHostsFile(data []byte) int {
|
||||
f := fuzz.NewConsumer(data)
|
||||
dir, err := os.MkdirTemp("", "fuzz-")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
err = f.CreateFiles(dir)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
b, err := f.GetBytes()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
_, _ = parseHostsFile(dir, b)
|
||||
return 1
|
||||
}
|
||||
617
core/remotes/docker/config/hosts.go
Normal file
617
core/remotes/docker/config/hosts.go
Normal file
@@ -0,0 +1,617 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package config contains utilities for helping configure the Docker resolver
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/pelletier/go-toml/v2"
|
||||
tomlu "github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
// UpdateClientFunc is a function that lets you to amend http Client behavior used by registry clients.
|
||||
type UpdateClientFunc func(client *http.Client) error
|
||||
|
||||
type hostConfig struct {
|
||||
scheme string
|
||||
host string
|
||||
path string
|
||||
|
||||
capabilities docker.HostCapabilities
|
||||
|
||||
caCerts []string
|
||||
clientPairs [][2]string
|
||||
skipVerify *bool
|
||||
|
||||
header http.Header
|
||||
|
||||
// TODO: Add credential configuration (domain alias, username)
|
||||
}
|
||||
|
||||
// HostOptions is used to configure registry hosts
|
||||
type HostOptions struct {
|
||||
HostDir func(string) (string, error)
|
||||
Credentials func(host string) (string, string, error)
|
||||
DefaultTLS *tls.Config
|
||||
DefaultScheme string
|
||||
// UpdateClient will be called after creating http.Client object, so clients can provide extra configuration
|
||||
UpdateClient UpdateClientFunc
|
||||
AuthorizerOpts []docker.AuthorizerOpt
|
||||
}
|
||||
|
||||
// ConfigureHosts creates a registry hosts function from the provided
|
||||
// host creation options. The host directory can read hosts.toml or
|
||||
// certificate files laid out in the Docker specific layout.
|
||||
// If a `HostDir` function is not required, defaults are used.
|
||||
func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHosts {
|
||||
return func(host string) ([]docker.RegistryHost, error) {
|
||||
var hosts []hostConfig
|
||||
if options.HostDir != nil {
|
||||
dir, err := options.HostDir(host)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
if dir != "" {
|
||||
log.G(ctx).WithField("dir", dir).Debug("loading host directory")
|
||||
hosts, err = loadHostDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If hosts was not set, add a default host
|
||||
// NOTE: Check nil here and not empty, the host may be
|
||||
// intentionally configured to not have any endpoints
|
||||
if hosts == nil {
|
||||
hosts = make([]hostConfig, 1)
|
||||
}
|
||||
if len(hosts) > 0 && hosts[len(hosts)-1].host == "" {
|
||||
if host == "docker.io" {
|
||||
hosts[len(hosts)-1].scheme = "https"
|
||||
hosts[len(hosts)-1].host = "registry-1.docker.io"
|
||||
} else if docker.IsLocalhost(host) {
|
||||
hosts[len(hosts)-1].host = host
|
||||
if options.DefaultScheme == "" {
|
||||
_, port, _ := net.SplitHostPort(host)
|
||||
if port == "" || port == "443" {
|
||||
// If port is default or 443, only use https
|
||||
hosts[len(hosts)-1].scheme = "https"
|
||||
} else {
|
||||
// HTTP fallback logic will be used when protocol is ambiguous
|
||||
hosts[len(hosts)-1].scheme = "http"
|
||||
}
|
||||
|
||||
// When port is 80, protocol is not ambiguous
|
||||
if port != "80" {
|
||||
// Skipping TLS verification for localhost
|
||||
var skipVerify = true
|
||||
hosts[len(hosts)-1].skipVerify = &skipVerify
|
||||
}
|
||||
} else {
|
||||
hosts[len(hosts)-1].scheme = options.DefaultScheme
|
||||
}
|
||||
} else {
|
||||
hosts[len(hosts)-1].host = host
|
||||
if options.DefaultScheme != "" {
|
||||
hosts[len(hosts)-1].scheme = options.DefaultScheme
|
||||
} else {
|
||||
hosts[len(hosts)-1].scheme = "https"
|
||||
}
|
||||
}
|
||||
hosts[len(hosts)-1].path = "/v2"
|
||||
hosts[len(hosts)-1].capabilities = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush
|
||||
}
|
||||
|
||||
// tlsConfigured indicates that TLS was configured and HTTP endpoints should
|
||||
// attempt to use the TLS configuration before falling back to HTTP
|
||||
var tlsConfigured bool
|
||||
|
||||
var defaultTLSConfig *tls.Config
|
||||
if options.DefaultTLS != nil {
|
||||
tlsConfigured = true
|
||||
defaultTLSConfig = options.DefaultTLS
|
||||
} else {
|
||||
defaultTLSConfig = &tls.Config{}
|
||||
}
|
||||
|
||||
defaultTransport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
FallbackDelay: 300 * time.Millisecond,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 10,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: defaultTLSConfig,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: defaultTransport,
|
||||
}
|
||||
if options.UpdateClient != nil {
|
||||
if err := options.UpdateClient(client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
authOpts := []docker.AuthorizerOpt{docker.WithAuthClient(client)}
|
||||
if options.Credentials != nil {
|
||||
authOpts = append(authOpts, docker.WithAuthCreds(options.Credentials))
|
||||
}
|
||||
authOpts = append(authOpts, options.AuthorizerOpts...)
|
||||
authorizer := docker.NewDockerAuthorizer(authOpts...)
|
||||
|
||||
rhosts := make([]docker.RegistryHost, len(hosts))
|
||||
for i, host := range hosts {
|
||||
// Allow setting for each host as well
|
||||
explicitTLS := tlsConfigured
|
||||
|
||||
if host.caCerts != nil || host.clientPairs != nil || host.skipVerify != nil {
|
||||
explicitTLS = true
|
||||
tr := defaultTransport.Clone()
|
||||
tlsConfig := tr.TLSClientConfig
|
||||
if host.skipVerify != nil {
|
||||
tlsConfig.InsecureSkipVerify = *host.skipVerify
|
||||
}
|
||||
if host.caCerts != nil {
|
||||
if tlsConfig.RootCAs == nil {
|
||||
rootPool, err := rootSystemPool()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize cert pool: %w", err)
|
||||
}
|
||||
tlsConfig.RootCAs = rootPool
|
||||
}
|
||||
for _, f := range host.caCerts {
|
||||
data, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read CA cert %q: %w", f, err)
|
||||
}
|
||||
if !tlsConfig.RootCAs.AppendCertsFromPEM(data) {
|
||||
return nil, fmt.Errorf("unable to load CA cert %q", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pair := range host.clientPairs {
|
||||
certPEMBlock, err := os.ReadFile(pair[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[0], err)
|
||||
}
|
||||
var keyPEMBlock []byte
|
||||
if pair[1] != "" {
|
||||
keyPEMBlock, err = os.ReadFile(pair[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[1], err)
|
||||
}
|
||||
} else {
|
||||
// Load key block from same PEM file
|
||||
keyPEMBlock = certPEMBlock
|
||||
}
|
||||
cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load X509 key pair: %w", err)
|
||||
}
|
||||
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
|
||||
}
|
||||
|
||||
c := *client
|
||||
c.Transport = tr
|
||||
if options.UpdateClient != nil {
|
||||
if err := options.UpdateClient(&c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
rhosts[i].Client = &c
|
||||
rhosts[i].Authorizer = docker.NewDockerAuthorizer(append(authOpts, docker.WithAuthClient(&c))...)
|
||||
} else {
|
||||
rhosts[i].Client = client
|
||||
rhosts[i].Authorizer = authorizer
|
||||
}
|
||||
|
||||
// When TLS has been configured for the operation or host and
|
||||
// the protocol from the port number is ambiguous, use the
|
||||
// docker.HTTPFallback roundtripper to catch TLS errors and re-attempt the
|
||||
// request as http. This allows preference for https when configured but
|
||||
// also catches TLS errors early enough in the request to avoid sending
|
||||
// the request twice or consuming the request body.
|
||||
if host.scheme == "http" && explicitTLS {
|
||||
_, port, _ := net.SplitHostPort(host.host)
|
||||
if port != "" && port != "80" {
|
||||
log.G(ctx).WithField("host", host.host).Info("host will try HTTPS first since it is configured for HTTP with a TLS configuration, consider changing host to HTTPS or removing unused TLS configuration")
|
||||
host.scheme = "https"
|
||||
rhosts[i].Client.Transport = docker.HTTPFallback{RoundTripper: rhosts[i].Client.Transport}
|
||||
}
|
||||
}
|
||||
|
||||
rhosts[i].Scheme = host.scheme
|
||||
rhosts[i].Host = host.host
|
||||
rhosts[i].Path = host.path
|
||||
rhosts[i].Capabilities = host.capabilities
|
||||
rhosts[i].Header = host.header
|
||||
}
|
||||
|
||||
return rhosts, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// HostDirFromRoot returns a function which finds a host directory
|
||||
// based at the given root.
|
||||
func HostDirFromRoot(root string) func(string) (string, error) {
|
||||
return func(host string) (string, error) {
|
||||
for _, p := range hostPaths(root, host) {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return p, nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return "", errdefs.ErrNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// hostDirectory converts ":port" to "_port_" in directory names
|
||||
func hostDirectory(host string) string {
|
||||
idx := strings.LastIndex(host, ":")
|
||||
if idx > 0 {
|
||||
return host[:idx] + "_" + host[idx+1:] + "_"
|
||||
}
|
||||
return host
|
||||
}
|
||||
|
||||
func loadHostDir(ctx context.Context, hostsDir string) ([]hostConfig, error) {
|
||||
b, err := os.ReadFile(filepath.Join(hostsDir, "hosts.toml"))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
// If hosts.toml does not exist, fallback to checking for
|
||||
// certificate files based on Docker's certificate file
|
||||
// pattern (".crt", ".cert", ".key" files)
|
||||
return loadCertFiles(ctx, hostsDir)
|
||||
}
|
||||
|
||||
hosts, err := parseHostsFile(hostsDir, b)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to decode hosts.toml")
|
||||
// Fallback to checking certificate files
|
||||
return loadCertFiles(ctx, hostsDir)
|
||||
}
|
||||
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
type hostFileConfig struct {
|
||||
// Capabilities determine what operations a host is
|
||||
// capable of performing. Allowed values
|
||||
// - pull
|
||||
// - resolve
|
||||
// - push
|
||||
Capabilities []string `toml:"capabilities"`
|
||||
|
||||
// CACert are the public key certificates for TLS
|
||||
// Accepted types
|
||||
// - string - Single file with certificate(s)
|
||||
// - []string - Multiple files with certificates
|
||||
CACert interface{} `toml:"ca"`
|
||||
|
||||
// Client keypair(s) for TLS with client authentication
|
||||
// Accepted types
|
||||
// - string - Single file with public and private keys
|
||||
// - []string - Multiple files with public and private keys
|
||||
// - [][2]string - Multiple keypairs with public and private keys in separate files
|
||||
Client interface{} `toml:"client"`
|
||||
|
||||
// SkipVerify skips verification of the server's certificate chain
|
||||
// and host name. This should only be used for testing or in
|
||||
// combination with other methods of verifying connections.
|
||||
SkipVerify *bool `toml:"skip_verify"`
|
||||
|
||||
// Header are additional header files to send to the server
|
||||
Header map[string]interface{} `toml:"header"`
|
||||
|
||||
// OverridePath indicates the API root endpoint is defined in the URL
|
||||
// path rather than by the API specification.
|
||||
// This may be used with non-compliant OCI registries to override the
|
||||
// API root endpoint.
|
||||
OverridePath bool `toml:"override_path"`
|
||||
|
||||
// TODO: Credentials: helper? name? username? alternate domain? token?
|
||||
}
|
||||
|
||||
func parseHostsFile(baseDir string, b []byte) ([]hostConfig, error) {
|
||||
orderedHosts, err := getSortedHosts(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := struct {
|
||||
hostFileConfig
|
||||
// Server specifies the default server. When `host` is
|
||||
// also specified, those hosts are tried first.
|
||||
Server string `toml:"server"`
|
||||
// HostConfigs store the per-host configuration
|
||||
HostConfigs map[string]hostFileConfig `toml:"host"`
|
||||
}{}
|
||||
|
||||
var (
|
||||
hosts []hostConfig
|
||||
)
|
||||
|
||||
if err := toml.Unmarshal(b, &c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse hosts array
|
||||
for _, host := range orderedHosts {
|
||||
config := c.HostConfigs[host]
|
||||
|
||||
parsed, err := parseHostConfig(host, baseDir, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hosts = append(hosts, parsed)
|
||||
}
|
||||
|
||||
// Parse root host config and append it as the last element
|
||||
parsed, err := parseHostConfig(c.Server, baseDir, c.hostFileConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hosts = append(hosts, parsed)
|
||||
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
func parseHostConfig(server string, baseDir string, config hostFileConfig) (hostConfig, error) {
|
||||
var (
|
||||
result = hostConfig{}
|
||||
err error
|
||||
)
|
||||
|
||||
if server != "" {
|
||||
if !strings.HasPrefix(server, "http") {
|
||||
server = "https://" + server
|
||||
}
|
||||
u, err := url.Parse(server)
|
||||
if err != nil {
|
||||
return hostConfig{}, fmt.Errorf("unable to parse server %v: %w", server, err)
|
||||
}
|
||||
result.scheme = u.Scheme
|
||||
result.host = u.Host
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = path.Clean(u.Path)
|
||||
if !strings.HasSuffix(u.Path, "/v2") && !config.OverridePath {
|
||||
u.Path = u.Path + "/v2"
|
||||
}
|
||||
} else if !config.OverridePath {
|
||||
u.Path = "/v2"
|
||||
}
|
||||
result.path = u.Path
|
||||
}
|
||||
|
||||
result.skipVerify = config.SkipVerify
|
||||
|
||||
if len(config.Capabilities) > 0 {
|
||||
for _, c := range config.Capabilities {
|
||||
switch strings.ToLower(c) {
|
||||
case "pull":
|
||||
result.capabilities |= docker.HostCapabilityPull
|
||||
case "resolve":
|
||||
result.capabilities |= docker.HostCapabilityResolve
|
||||
case "push":
|
||||
result.capabilities |= docker.HostCapabilityPush
|
||||
default:
|
||||
return hostConfig{}, fmt.Errorf("unknown capability %v", c)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result.capabilities = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush
|
||||
}
|
||||
|
||||
if config.CACert != nil {
|
||||
switch cert := config.CACert.(type) {
|
||||
case string:
|
||||
result.caCerts = []string{makeAbsPath(cert, baseDir)}
|
||||
case []interface{}:
|
||||
result.caCerts, err = makeStringSlice(cert, func(p string) string {
|
||||
return makeAbsPath(p, baseDir)
|
||||
})
|
||||
if err != nil {
|
||||
return hostConfig{}, err
|
||||
}
|
||||
default:
|
||||
return hostConfig{}, fmt.Errorf("invalid type %v for \"ca\"", cert)
|
||||
}
|
||||
}
|
||||
|
||||
if config.Client != nil {
|
||||
switch client := config.Client.(type) {
|
||||
case string:
|
||||
result.clientPairs = [][2]string{{makeAbsPath(client, baseDir), ""}}
|
||||
case []interface{}:
|
||||
// []string or [][2]string
|
||||
for _, pairs := range client {
|
||||
switch p := pairs.(type) {
|
||||
case string:
|
||||
result.clientPairs = append(result.clientPairs, [2]string{makeAbsPath(p, baseDir), ""})
|
||||
case []interface{}:
|
||||
slice, err := makeStringSlice(p, func(s string) string {
|
||||
return makeAbsPath(s, baseDir)
|
||||
})
|
||||
if err != nil {
|
||||
return hostConfig{}, err
|
||||
}
|
||||
if len(slice) != 2 {
|
||||
return hostConfig{}, fmt.Errorf("invalid pair %v for \"client\"", p)
|
||||
}
|
||||
|
||||
var pair [2]string
|
||||
copy(pair[:], slice)
|
||||
result.clientPairs = append(result.clientPairs, pair)
|
||||
default:
|
||||
return hostConfig{}, fmt.Errorf("invalid type %T for \"client\"", p)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return hostConfig{}, fmt.Errorf("invalid type %v for \"client\"", client)
|
||||
}
|
||||
}
|
||||
|
||||
if config.Header != nil {
|
||||
header := http.Header{}
|
||||
for key, ty := range config.Header {
|
||||
switch value := ty.(type) {
|
||||
case string:
|
||||
header[key] = []string{value}
|
||||
case []interface{}:
|
||||
header[key], err = makeStringSlice(value, nil)
|
||||
if err != nil {
|
||||
return hostConfig{}, err
|
||||
}
|
||||
default:
|
||||
return hostConfig{}, fmt.Errorf("invalid type %v for header %q", ty, key)
|
||||
}
|
||||
}
|
||||
result.header = header
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// getSortedHosts returns the list of hosts in the order are they defined in the file.
|
||||
func getSortedHosts(b []byte) ([]string, error) {
|
||||
var hostsInOrder []string
|
||||
|
||||
// Use toml unstable package for directly parsing toml
|
||||
// See https://github.com/pelletier/go-toml/discussions/801#discussioncomment-7083586
|
||||
p := tomlu.Parser{}
|
||||
p.Reset(b)
|
||||
|
||||
var host string
|
||||
// iterate over all top level expressions
|
||||
for p.NextExpression() {
|
||||
e := p.Expression()
|
||||
|
||||
if e.Kind != tomlu.Table {
|
||||
continue
|
||||
}
|
||||
|
||||
// Let's look at the key. It's an iterator over the multiple dotted parts of the key.
|
||||
var parts []string
|
||||
for it := e.Key(); it.Next(); {
|
||||
parts = append(parts, string(it.Node().Data))
|
||||
}
|
||||
|
||||
// only consider keys that look like `hosts.XXX`
|
||||
// and skip subtables such as `hosts.XXX.header`
|
||||
if len(parts) < 2 || parts[0] != "host" || parts[1] == host {
|
||||
continue
|
||||
}
|
||||
|
||||
host = parts[1]
|
||||
hostsInOrder = append(hostsInOrder, host)
|
||||
}
|
||||
|
||||
return hostsInOrder, nil
|
||||
}
|
||||
|
||||
// makeStringSlice is a helper func to convert from []interface{} to []string.
|
||||
// Additionally an optional cb func may be passed to perform string mapping.
|
||||
func makeStringSlice(slice []interface{}, cb func(string) string) ([]string, error) {
|
||||
out := make([]string, len(slice))
|
||||
for i, value := range slice {
|
||||
str, ok := value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to cast %v to string", value)
|
||||
}
|
||||
|
||||
if cb != nil {
|
||||
out[i] = cb(str)
|
||||
} else {
|
||||
out[i] = str
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func makeAbsPath(p string, base string) string {
|
||||
if filepath.IsAbs(p) {
|
||||
return p
|
||||
}
|
||||
return filepath.Join(base, p)
|
||||
}
|
||||
|
||||
// loadCertsDir loads certs from certsDir like "/etc/docker/certs.d" .
|
||||
// Compatible with Docker file layout
|
||||
// - files ending with ".crt" are treated as CA certificate files
|
||||
// - files ending with ".cert" are treated as client certificates, and
|
||||
// files with the same name but ending with ".key" are treated as the
|
||||
// corresponding private key.
|
||||
// NOTE: If a ".key" file is missing, this function will just return
|
||||
// the ".cert", which may contain the private key. If the ".cert" file
|
||||
// does not contain the private key, the caller should detect and error.
|
||||
func loadCertFiles(ctx context.Context, certsDir string) ([]hostConfig, error) {
|
||||
fs, err := os.ReadDir(certsDir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
hosts := make([]hostConfig, 1)
|
||||
for _, f := range fs {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
hosts[0].caCerts = append(hosts[0].caCerts, filepath.Join(certsDir, f.Name()))
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
var pair [2]string
|
||||
certFile := f.Name()
|
||||
pair[0] = filepath.Join(certsDir, certFile)
|
||||
// Check if key also exists
|
||||
keyFile := filepath.Join(certsDir, certFile[:len(certFile)-5]+".key")
|
||||
if _, err := os.Stat(keyFile); err == nil {
|
||||
pair[1] = keyFile
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
hosts[0].clientPairs = append(hosts[0].clientPairs, pair)
|
||||
}
|
||||
}
|
||||
return hosts, nil
|
||||
}
|
||||
609
core/remotes/docker/config/hosts_test.go
Normal file
609
core/remotes/docker/config/hosts_test.go
Normal file
@@ -0,0 +1,609 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker"
|
||||
"github.com/containerd/log/logtest"
|
||||
)
|
||||
|
||||
const allCaps = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush
|
||||
|
||||
func TestDefaultHosts(t *testing.T) {
|
||||
ctx := logtest.WithT(context.Background(), t)
|
||||
resolve := ConfigureHosts(ctx, HostOptions{})
|
||||
|
||||
for _, tc := range []struct {
|
||||
host string
|
||||
expected []docker.RegistryHost
|
||||
}{
|
||||
{
|
||||
host: "docker.io",
|
||||
expected: []docker.RegistryHost{
|
||||
{
|
||||
Scheme: "https",
|
||||
Host: "registry-1.docker.io",
|
||||
Path: "/v2",
|
||||
Capabilities: allCaps,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
hosts, err := resolve(tc.host)
|
||||
if err != nil {
|
||||
t.Errorf("[%s] resolve failed: %v", tc.host, err)
|
||||
continue
|
||||
}
|
||||
if len(hosts) != len(tc.expected) {
|
||||
t.Errorf("[%s] unexpected number of hosts %d, expected %d", tc.host, len(hosts), len(tc.expected))
|
||||
continue
|
||||
}
|
||||
for j := range hosts {
|
||||
if !compareRegistryHost(hosts[j], tc.expected[j]) {
|
||||
|
||||
t.Errorf("[%s] [%d] unexpected host %v, expected %v", tc.host, j, hosts[j], tc.expected[j])
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHostFile(t *testing.T) {
|
||||
const testtoml = `
|
||||
server = "https://test-default.registry"
|
||||
ca = "/etc/path/default"
|
||||
[header]
|
||||
x-custom-1 = "custom header"
|
||||
|
||||
[host."https://mirror.registry"]
|
||||
capabilities = ["pull"]
|
||||
ca = "/etc/certs/mirror.pem"
|
||||
skip_verify = false
|
||||
[host."https://mirror.registry".header]
|
||||
x-custom-2 = ["value1", "value2"]
|
||||
|
||||
[host."https://mirror-bak.registry/us"]
|
||||
capabilities = ["pull"]
|
||||
skip_verify = true
|
||||
|
||||
[host."http://mirror.registry"]
|
||||
capabilities = ["pull"]
|
||||
|
||||
[host."https://test-1.registry"]
|
||||
capabilities = ["pull", "resolve", "push"]
|
||||
ca = ["/etc/certs/test-1-ca.pem", "/etc/certs/special.pem"]
|
||||
client = [["/etc/certs/client.cert", "/etc/certs/client.key"],["/etc/certs/client.pem", ""]]
|
||||
|
||||
[host."https://test-2.registry"]
|
||||
client = "/etc/certs/client.pem"
|
||||
|
||||
[host."https://test-3.registry"]
|
||||
client = ["/etc/certs/client-1.pem", "/etc/certs/client-2.pem"]
|
||||
|
||||
[host."https://noncompliantmirror.registry/v2/namespaceprefix"]
|
||||
capabilities = ["pull"]
|
||||
override_path = true
|
||||
|
||||
[host."https://noprefixnoncompliant.registry"]
|
||||
override_path = true
|
||||
|
||||
[host."https://onlyheader.registry".header]
|
||||
x-custom-1 = "justaheader"
|
||||
`
|
||||
var tb, fb = true, false
|
||||
expected := []hostConfig{
|
||||
{
|
||||
scheme: "https",
|
||||
host: "mirror.registry",
|
||||
path: "/v2",
|
||||
capabilities: docker.HostCapabilityPull,
|
||||
caCerts: []string{filepath.FromSlash("/etc/certs/mirror.pem")},
|
||||
skipVerify: &fb,
|
||||
header: http.Header{"x-custom-2": {"value1", "value2"}},
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "mirror-bak.registry",
|
||||
path: "/us/v2",
|
||||
capabilities: docker.HostCapabilityPull,
|
||||
skipVerify: &tb,
|
||||
},
|
||||
{
|
||||
scheme: "http",
|
||||
host: "mirror.registry",
|
||||
path: "/v2",
|
||||
capabilities: docker.HostCapabilityPull,
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "test-1.registry",
|
||||
path: "/v2",
|
||||
capabilities: allCaps,
|
||||
caCerts: []string{filepath.FromSlash("/etc/certs/test-1-ca.pem"), filepath.FromSlash("/etc/certs/special.pem")},
|
||||
clientPairs: [][2]string{
|
||||
{filepath.FromSlash("/etc/certs/client.cert"), filepath.FromSlash("/etc/certs/client.key")},
|
||||
{filepath.FromSlash("/etc/certs/client.pem"), ""},
|
||||
},
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "test-2.registry",
|
||||
path: "/v2",
|
||||
capabilities: allCaps,
|
||||
clientPairs: [][2]string{
|
||||
{filepath.FromSlash("/etc/certs/client.pem")},
|
||||
},
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "test-3.registry",
|
||||
path: "/v2",
|
||||
capabilities: allCaps,
|
||||
clientPairs: [][2]string{
|
||||
{filepath.FromSlash("/etc/certs/client-1.pem")},
|
||||
{filepath.FromSlash("/etc/certs/client-2.pem")},
|
||||
},
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "noncompliantmirror.registry",
|
||||
path: "/v2/namespaceprefix",
|
||||
capabilities: docker.HostCapabilityPull,
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "noprefixnoncompliant.registry",
|
||||
capabilities: allCaps,
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "onlyheader.registry",
|
||||
path: "/v2",
|
||||
capabilities: allCaps,
|
||||
header: http.Header{"x-custom-1": {"justaheader"}},
|
||||
},
|
||||
{
|
||||
scheme: "https",
|
||||
host: "test-default.registry",
|
||||
path: "/v2",
|
||||
capabilities: allCaps,
|
||||
caCerts: []string{filepath.FromSlash("/etc/path/default")},
|
||||
header: http.Header{"x-custom-1": {"custom header"}},
|
||||
},
|
||||
}
|
||||
hosts, err := parseHostsFile("", []byte(testtoml))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
t.Log("HostConfigs...\nActual:\n" + printHostConfig(hosts) + "Expected:\n" + printHostConfig(expected))
|
||||
}
|
||||
}()
|
||||
|
||||
if len(hosts) != len(expected) {
|
||||
t.Fatalf("Unexpected number of hosts %d, expected %d", len(hosts), len(expected))
|
||||
}
|
||||
|
||||
for i := range hosts {
|
||||
if !compareHostConfig(hosts[i], expected[i]) {
|
||||
t.Fatalf("Mismatch at host %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadCertFiles(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
type testCase struct {
|
||||
input hostConfig
|
||||
}
|
||||
cases := map[string]testCase{
|
||||
"crt only": {
|
||||
input: hostConfig{host: "testing.io", caCerts: []string{filepath.Join(dir, "testing.io", "ca.crt")}},
|
||||
},
|
||||
"crt and cert pair": {
|
||||
input: hostConfig{
|
||||
host: "testing.io",
|
||||
caCerts: []string{filepath.Join(dir, "testing.io", "ca.crt")},
|
||||
clientPairs: [][2]string{
|
||||
{
|
||||
filepath.Join(dir, "testing.io", "client.cert"),
|
||||
filepath.Join(dir, "testing.io", "client.key"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cert pair only": {
|
||||
input: hostConfig{
|
||||
host: "testing.io",
|
||||
clientPairs: [][2]string{
|
||||
{
|
||||
filepath.Join(dir, "testing.io", "client.cert"),
|
||||
filepath.Join(dir, "testing.io", "client.key"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
||||
hostDir := filepath.Join(dir, tc.input.host)
|
||||
if err := os.MkdirAll(hostDir, 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(hostDir)
|
||||
|
||||
for _, f := range tc.input.caCerts {
|
||||
if err := os.WriteFile(f, testKey, 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pair := range tc.input.clientPairs {
|
||||
if err := os.WriteFile(pair[0], testKey, 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(pair[1], testKey, 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
configs, err := loadHostDir(context.Background(), hostDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(configs) != 1 {
|
||||
t.Fatalf("\nexpected:\n%+v\ngot:\n%+v", tc.input, configs)
|
||||
}
|
||||
|
||||
cfg := configs[0]
|
||||
cfg.host = tc.input.host
|
||||
|
||||
if !compareHostConfig(cfg, tc.input) {
|
||||
t.Errorf("\nexpected:\n%+v:\n\ngot:\n%+v", tc.input, cfg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPFallback(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
host string
|
||||
opts HostOptions
|
||||
expectedScheme string
|
||||
usesFallback bool
|
||||
}{
|
||||
{
|
||||
host: "localhost:8080",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "http",
|
||||
DefaultTLS: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
expectedScheme: "https",
|
||||
usesFallback: true,
|
||||
},
|
||||
{
|
||||
host: "localhost:8080",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "https",
|
||||
DefaultTLS: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "localhost:8080",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: true,
|
||||
},
|
||||
{
|
||||
host: "localhost:80",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "http",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "localhost:443",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "localhost:80",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "http",
|
||||
DefaultTLS: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
expectedScheme: "http",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "localhost",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "http",
|
||||
DefaultTLS: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
expectedScheme: "http",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "localhost",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "https",
|
||||
DefaultTLS: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "localhost",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "localhost:5000",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: true,
|
||||
},
|
||||
{
|
||||
host: "example.com",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
|
||||
{
|
||||
host: "example.com",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "http",
|
||||
DefaultTLS: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
expectedScheme: "http",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "example.com:5000",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "http",
|
||||
DefaultTLS: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
expectedScheme: "https",
|
||||
usesFallback: true,
|
||||
},
|
||||
{
|
||||
host: "example.com:5000",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "example2.com",
|
||||
opts: HostOptions{
|
||||
DefaultScheme: "http",
|
||||
},
|
||||
expectedScheme: "http",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "127.0.0.254:5000",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: true,
|
||||
},
|
||||
{
|
||||
host: "127.0.0.254",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
{
|
||||
host: "[::1]:5000",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: true,
|
||||
},
|
||||
{
|
||||
host: "::1",
|
||||
opts: HostOptions{},
|
||||
expectedScheme: "https",
|
||||
usesFallback: false,
|
||||
},
|
||||
} {
|
||||
testName := tc.host
|
||||
if tc.opts.DefaultScheme != "" {
|
||||
testName = testName + "-default-" + tc.opts.DefaultScheme
|
||||
}
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
ctx := logtest.WithT(context.TODO(), t)
|
||||
hosts := ConfigureHosts(ctx, tc.opts)
|
||||
testHosts, err := hosts(tc.host)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(testHosts) != 1 {
|
||||
t.Fatalf("expected a single host for localhost config, got %d hosts", len(testHosts))
|
||||
}
|
||||
if testHosts[0].Scheme != tc.expectedScheme {
|
||||
t.Fatalf("expected %s scheme for localhost with tls config, got %q", tc.expectedScheme, testHosts[0].Scheme)
|
||||
}
|
||||
_, ok := testHosts[0].Client.Transport.(docker.HTTPFallback)
|
||||
if tc.usesFallback && !ok {
|
||||
t.Fatal("expected http fallback configured for defaulted localhost endpoint")
|
||||
} else if ok && !tc.usesFallback {
|
||||
t.Fatal("expected no http fallback configured for defaulted localhost endpoint")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func compareRegistryHost(j, k docker.RegistryHost) bool {
|
||||
if j.Scheme != k.Scheme {
|
||||
return false
|
||||
}
|
||||
if j.Host != k.Host {
|
||||
return false
|
||||
}
|
||||
if j.Path != k.Path {
|
||||
return false
|
||||
}
|
||||
if j.Capabilities != k.Capabilities {
|
||||
return false
|
||||
}
|
||||
// Not comparing TLS configs or authorizations
|
||||
return true
|
||||
}
|
||||
|
||||
func compareHostConfig(j, k hostConfig) bool {
|
||||
if j.scheme != k.scheme {
|
||||
return false
|
||||
}
|
||||
if j.host != k.host {
|
||||
return false
|
||||
}
|
||||
if j.path != k.path {
|
||||
return false
|
||||
}
|
||||
if j.capabilities != k.capabilities {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(j.caCerts) != len(k.caCerts) {
|
||||
return false
|
||||
}
|
||||
for i := range j.caCerts {
|
||||
if j.caCerts[i] != k.caCerts[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if len(j.clientPairs) != len(k.clientPairs) {
|
||||
return false
|
||||
}
|
||||
for i := range j.clientPairs {
|
||||
if j.clientPairs[i][0] != k.clientPairs[i][0] {
|
||||
return false
|
||||
}
|
||||
if j.clientPairs[i][1] != k.clientPairs[i][1] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if j.skipVerify != nil && k.skipVerify != nil {
|
||||
if *j.skipVerify != *k.skipVerify {
|
||||
return false
|
||||
}
|
||||
} else if j.skipVerify != nil || k.skipVerify != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(j.header) != len(k.header) {
|
||||
return false
|
||||
}
|
||||
for key := range j.header {
|
||||
if len(j.header[key]) != len(k.header[key]) {
|
||||
return false
|
||||
}
|
||||
for i := range j.header[key] {
|
||||
if j.header[key][i] != k.header[key][i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func printHostConfig(hc []hostConfig) string {
|
||||
b := bytes.NewBuffer(nil)
|
||||
for i := range hc {
|
||||
fmt.Fprintf(b, "\t[%d]\tscheme: %q\n", i, hc[i].scheme)
|
||||
fmt.Fprintf(b, "\t\thost: %q\n", hc[i].host)
|
||||
fmt.Fprintf(b, "\t\tpath: %q\n", hc[i].path)
|
||||
fmt.Fprintf(b, "\t\tcaps: %03b\n", hc[i].capabilities)
|
||||
fmt.Fprintf(b, "\t\tca: %#v\n", hc[i].caCerts)
|
||||
fmt.Fprintf(b, "\t\tclients: %#v\n", hc[i].clientPairs)
|
||||
if hc[i].skipVerify == nil {
|
||||
fmt.Fprintf(b, "\t\tskip-verify: %v\n", hc[i].skipVerify)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\t\tskip-verify: %t\n", *hc[i].skipVerify)
|
||||
}
|
||||
fmt.Fprintf(b, "\t\theader: %#v\n", hc[i].header)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
var (
|
||||
testKey = []byte(`-----BEGIN PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDa+zvPgFXwra4S
|
||||
0DzEWRgZHxVTDG1sJsnN/jOaHCNpRyABGVW5kdei9WFWv3dpiELI+guQMjdUL++w
|
||||
M68bs6cXKW+1nW6u5uWuGwklOwkoKoeHkkn/vHef7ybk+5qdk6AYY0DKQsrBBOvj
|
||||
f0WAnG+1xi8VIOEBmce0/47MexOiuILVkjokgdmDCOc8ShkT6/EJTCsI1wDew/4G
|
||||
9IiRzw2xSM0ZATAtEC3HEBRLJGWZQtuKlLCuzJ+erOWUcg2cjnSgR3PmaAXE//5g
|
||||
SoeqEbtTo1satf9AR4VvreIAI8m0eyo8ABMLTkZovEFcUUHetL63hdqItjCeRfrQ
|
||||
zK4LMRFbAgMBAAECggEBAJtP6UHo0gtcA8SQMSlJz4+xvhwjClDUyfjyPIMnRe5b
|
||||
ZdWhtG1jhT+tLhaqwfT1kfidcCobk6aAQU4FukK5jt8cooB7Yo9mcKylvDzNvFbi
|
||||
ozGCjj113JpwsnNiCG2O0NO7Qa6y5L810GCQWik3yvtvzuD7atsJyN0VDKD3Ahw7
|
||||
1X8z76grZFlhVMCTAA3vAJ2y2p3sd+TGC/PIhnsvChwxEorGCnMj93mBaUI7zZRY
|
||||
EZhlk4ZvC9sUvlVUuYC+wAHjasgN9s3AzsOBSx+Xt3NaXQHzhL0mVo/vu/pjjFBs
|
||||
WBLR1PBoIfveTJPOp+Hrr4cuCK0NuX9sWlWPYLl5A2ECgYEA5fq3n4PhbJ2BuTS5
|
||||
AVgOmjRpk1eogb6aSY+cx7Mr++ADF9EYXc5tgKoUsDeeiiyK2lv6IKavoTWT1kdd
|
||||
shiclyEzp2CxG5GtbC/g2XHiBLepgo1fjfev3btCmIeGVBjglOx4F3gEsRygrAID
|
||||
zcz94m2I+uqLT8hvWnccIqScglkCgYEA88H2ji4Nvx6TmqCLcER0vNDVoxxDfgGb
|
||||
iohvenD2jmmdTnezTddsgECAI8L0BPNS/0vBCduTjs5BqhKbIfQvuK5CANMUcxuQ
|
||||
twWH8kPvTYJVgsmWP6sSXSz3PohWC5EA9xACExGtyN6d7sLUCV0SBhjlcgMvGuDM
|
||||
lP6NjyyWctMCgYBKdfGr+QQsqZaNw48+6ybXMK8aIKCTWYYU2SW21sEf7PizZmTQ
|
||||
Qnzb0rWeFHQFYsSWTH9gwPdOZ8107GheuG9C02IpCDpvpawTwjC31pKKWnjMpz9P
|
||||
9OkBDpdSUVbhtahJL4L2fkpumck/x+s5X+y3uiVGsFfovgmnrbbzVH7ECQKBgQCC
|
||||
MYs7DaYR+obkA/P2FtozL2esIyB5YOpu58iDIWrPTeHTU2PVo8Y0Cj9m2m3zZvNh
|
||||
oFiOp1T85XV1HVL2o7IJdimSvyshAAwfdTjTUS2zvHVn0bwKbZj1Y1r7b15l9yEI
|
||||
1OgGv16O9zhrmmweRDOoRgvnBYRXWtJqkjuRyULiOQKBgQC/lSYigV32Eb8Eg1pv
|
||||
7OcPWv4qV4880lRE0MXuQ4VFa4+pqvdziYFYQD4jDYJ4IX9l//bsobL0j7z0P0Gk
|
||||
wDFti9bRwRoO1ntqoA8n2pDLlLRGl0dyjB6fHzp27oqtyf1HRlHiow7Gqx5b5JOk
|
||||
tycYKwA3DuaSyqPe6MthLneq8w==
|
||||
-----END PRIVATE KEY-----
|
||||
`)
|
||||
)
|
||||
85
core/remotes/docker/converter.go
Normal file
85
core/remotes/docker/converter.go
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// LegacyConfigMediaType should be replaced by OCI image spec.
|
||||
//
|
||||
// More detail: docker/distribution#1622
|
||||
const LegacyConfigMediaType = "application/octet-stream"
|
||||
|
||||
// ConvertManifest changes application/octet-stream to schema2 config media type if need.
|
||||
//
|
||||
// NOTE:
|
||||
// 1. original manifest will be deleted by next gc round.
|
||||
// 2. don't cover manifest list.
|
||||
func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
if !images.IsManifestType(desc.MediaType) {
|
||||
log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType)
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// read manifest data
|
||||
mb, err := content.ReadBlob(ctx, store, desc)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err)
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(mb, &manifest); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err)
|
||||
}
|
||||
|
||||
// check config media type
|
||||
if manifest.Config.MediaType != LegacyConfigMediaType {
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
manifest.Config.MediaType = images.MediaTypeDockerSchema2Config
|
||||
data, err := json.MarshalIndent(manifest, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err)
|
||||
}
|
||||
|
||||
// update manifest with gc labels
|
||||
desc.Digest = digest.Canonical.FromBytes(data)
|
||||
desc.Size = int64(len(data))
|
||||
|
||||
labels := map[string]string{}
|
||||
for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) {
|
||||
labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String()
|
||||
}
|
||||
|
||||
ref := remotes.MakeRefKey(ctx, desc)
|
||||
if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err)
|
||||
}
|
||||
return desc, nil
|
||||
}
|
||||
54
core/remotes/docker/converter_fuzz.go
Normal file
54
core/remotes/docker/converter_fuzz.go
Normal file
@@ -0,0 +1,54 @@
|
||||
//go:build gofuzz
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||
"github.com/containerd/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func FuzzConvertManifest(data []byte) int {
|
||||
ctx := context.Background()
|
||||
|
||||
// Do not log the message below
|
||||
// level=warning msg="do nothing for media type: ..."
|
||||
log.G(ctx).Logger.SetLevel(log.PanicLevel)
|
||||
|
||||
f := fuzz.NewConsumer(data)
|
||||
desc := ocispec.Descriptor{}
|
||||
err := f.GenerateStruct(&desc)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
tmpdir, err := os.MkdirTemp("", "fuzzing-")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
cs, err := local.NewStore(tmpdir)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
_, _ = ConvertManifest(ctx, cs, desc)
|
||||
return 1
|
||||
}
|
||||
283
core/remotes/docker/errcode.go
Normal file
283
core/remotes/docker/errcode.go
Normal file
@@ -0,0 +1,283 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrorCoder is the base interface for ErrorCode and Error allowing
|
||||
// users of each to just call ErrorCode to get the real ID of each
|
||||
type ErrorCoder interface {
|
||||
ErrorCode() ErrorCode
|
||||
}
|
||||
|
||||
// ErrorCode represents the error type. The errors are serialized via strings
|
||||
// and the integer format may change and should *never* be exported.
|
||||
type ErrorCode int
|
||||
|
||||
var _ error = ErrorCode(0)
|
||||
|
||||
// ErrorCode just returns itself
|
||||
func (ec ErrorCode) ErrorCode() ErrorCode {
|
||||
return ec
|
||||
}
|
||||
|
||||
// Error returns the ID/Value
|
||||
func (ec ErrorCode) Error() string {
|
||||
// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
|
||||
return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
|
||||
}
|
||||
|
||||
// Descriptor returns the descriptor for the error code.
|
||||
func (ec ErrorCode) Descriptor() ErrorDescriptor {
|
||||
d, ok := errorCodeToDescriptors[ec]
|
||||
|
||||
if !ok {
|
||||
return ErrorCodeUnknown.Descriptor()
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// String returns the canonical identifier for this error code.
|
||||
func (ec ErrorCode) String() string {
|
||||
return ec.Descriptor().Value
|
||||
}
|
||||
|
||||
// Message returned the human-readable error message for this error code.
|
||||
func (ec ErrorCode) Message() string {
|
||||
return ec.Descriptor().Message
|
||||
}
|
||||
|
||||
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
|
||||
// result.
|
||||
func (ec ErrorCode) MarshalText() (text []byte, err error) {
|
||||
return []byte(ec.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText decodes the form generated by MarshalText.
|
||||
func (ec *ErrorCode) UnmarshalText(text []byte) error {
|
||||
desc, ok := idToDescriptors[string(text)]
|
||||
|
||||
if !ok {
|
||||
desc = ErrorCodeUnknown.Descriptor()
|
||||
}
|
||||
|
||||
*ec = desc.Code
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithMessage creates a new Error struct based on the passed-in info and
|
||||
// overrides the Message property.
|
||||
func (ec ErrorCode) WithMessage(message string) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
// WithDetail creates a new Error struct based on the passed-in info and
|
||||
// set the Detail property appropriately
|
||||
func (ec ErrorCode) WithDetail(detail interface{}) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: ec.Message(),
|
||||
}.WithDetail(detail)
|
||||
}
|
||||
|
||||
// WithArgs creates a new Error struct and sets the Args slice
|
||||
func (ec ErrorCode) WithArgs(args ...interface{}) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: ec.Message(),
|
||||
}.WithArgs(args...)
|
||||
}
|
||||
|
||||
// Error provides a wrapper around ErrorCode with extra Details provided.
|
||||
type Error struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Detail interface{} `json:"detail,omitempty"`
|
||||
|
||||
// TODO(duglin): See if we need an "args" property so we can do the
|
||||
// variable substitution right before showing the message to the user
|
||||
}
|
||||
|
||||
var _ error = Error{}
|
||||
|
||||
// ErrorCode returns the ID/Value of this Error
|
||||
func (e Error) ErrorCode() ErrorCode {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// Error returns a human readable representation of the error.
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
|
||||
}
|
||||
|
||||
// WithDetail will return a new Error, based on the current one, but with
|
||||
// some Detail info added
|
||||
func (e Error) WithDetail(detail interface{}) Error {
|
||||
return Error{
|
||||
Code: e.Code,
|
||||
Message: e.Message,
|
||||
Detail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
// WithArgs uses the passed-in list of interface{} as the substitution
|
||||
// variables in the Error's Message string, but returns a new Error
|
||||
func (e Error) WithArgs(args ...interface{}) Error {
|
||||
return Error{
|
||||
Code: e.Code,
|
||||
Message: fmt.Sprintf(e.Code.Message(), args...),
|
||||
Detail: e.Detail,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorDescriptor provides relevant information about a given error code.
|
||||
type ErrorDescriptor struct {
|
||||
// Code is the error code that this descriptor describes.
|
||||
Code ErrorCode
|
||||
|
||||
// Value provides a unique, string key, often captilized with
|
||||
// underscores, to identify the error code. This value is used as the
|
||||
// keyed value when serializing api errors.
|
||||
Value string
|
||||
|
||||
// Message is a short, human readable description of the error condition
|
||||
// included in API responses.
|
||||
Message string
|
||||
|
||||
// Description provides a complete account of the errors purpose, suitable
|
||||
// for use in documentation.
|
||||
Description string
|
||||
|
||||
// HTTPStatusCode provides the http status code that is associated with
|
||||
// this error condition.
|
||||
HTTPStatusCode int
|
||||
}
|
||||
|
||||
// ParseErrorCode returns the value by the string error code.
|
||||
// `ErrorCodeUnknown` will be returned if the error is not known.
|
||||
func ParseErrorCode(value string) ErrorCode {
|
||||
ed, ok := idToDescriptors[value]
|
||||
if ok {
|
||||
return ed.Code
|
||||
}
|
||||
|
||||
return ErrorCodeUnknown
|
||||
}
|
||||
|
||||
// Errors provides the envelope for multiple errors and a few sugar methods
|
||||
// for use within the application.
|
||||
type Errors []error
|
||||
|
||||
var _ error = Errors{}
|
||||
|
||||
func (errs Errors) Error() string {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return "<nil>"
|
||||
case 1:
|
||||
return errs[0].Error()
|
||||
default:
|
||||
msg := "errors:\n"
|
||||
for _, err := range errs {
|
||||
msg += err.Error() + "\n"
|
||||
}
|
||||
return msg
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the current number of errors.
|
||||
func (errs Errors) Len() int {
|
||||
return len(errs)
|
||||
}
|
||||
|
||||
// MarshalJSON converts slice of error, ErrorCode or Error into a
|
||||
// slice of Error - then serializes
|
||||
func (errs Errors) MarshalJSON() ([]byte, error) {
|
||||
var tmpErrs struct {
|
||||
Errors []Error `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
for _, daErr := range errs {
|
||||
var err Error
|
||||
|
||||
switch daErr := daErr.(type) {
|
||||
case ErrorCode:
|
||||
err = daErr.WithDetail(nil)
|
||||
case Error:
|
||||
err = daErr
|
||||
default:
|
||||
err = ErrorCodeUnknown.WithDetail(daErr)
|
||||
|
||||
}
|
||||
|
||||
// If the Error struct was setup and they forgot to set the
|
||||
// Message field (meaning its "") then grab it from the ErrCode
|
||||
msg := err.Message
|
||||
if msg == "" {
|
||||
msg = err.Code.Message()
|
||||
}
|
||||
|
||||
tmpErrs.Errors = append(tmpErrs.Errors, Error{
|
||||
Code: err.Code,
|
||||
Message: msg,
|
||||
Detail: err.Detail,
|
||||
})
|
||||
}
|
||||
|
||||
return json.Marshal(tmpErrs)
|
||||
}
|
||||
|
||||
// UnmarshalJSON deserializes []Error and then converts it into slice of
|
||||
// Error or ErrorCode
|
||||
func (errs *Errors) UnmarshalJSON(data []byte) error {
|
||||
var tmpErrs struct {
|
||||
Errors []Error
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &tmpErrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var newErrs Errors
|
||||
for _, daErr := range tmpErrs.Errors {
|
||||
// If Message is empty or exactly matches the Code's message string
|
||||
// then just use the Code, no need for a full Error struct
|
||||
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
|
||||
// Error's w/o details get converted to ErrorCode
|
||||
newErrs = append(newErrs, daErr.Code)
|
||||
} else {
|
||||
// Error's w/ details are untouched
|
||||
newErrs = append(newErrs, Error{
|
||||
Code: daErr.Code,
|
||||
Message: daErr.Message,
|
||||
Detail: daErr.Detail,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
*errs = newErrs
|
||||
return nil
|
||||
}
|
||||
154
core/remotes/docker/errdesc.go
Normal file
154
core/remotes/docker/errdesc.go
Normal file
@@ -0,0 +1,154 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
|
||||
idToDescriptors = map[string]ErrorDescriptor{}
|
||||
groupToDescriptors = map[string][]ErrorDescriptor{}
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorCodeUnknown is a generic error that can be used as a last
|
||||
// resort if there is no situation-specific error message that can be used
|
||||
ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNKNOWN",
|
||||
Message: "unknown error",
|
||||
Description: `Generic error returned when the error does not have an
|
||||
API classification.`,
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeUnsupported is returned when an operation is not supported.
|
||||
ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNSUPPORTED",
|
||||
Message: "The operation is unsupported.",
|
||||
Description: `The operation was unsupported due to a missing
|
||||
implementation or invalid set of parameters.`,
|
||||
HTTPStatusCode: http.StatusMethodNotAllowed,
|
||||
})
|
||||
|
||||
// ErrorCodeUnauthorized is returned if a request requires
|
||||
// authentication.
|
||||
ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNAUTHORIZED",
|
||||
Message: "authentication required",
|
||||
Description: `The access controller was unable to authenticate
|
||||
the client. Often this will be accompanied by a
|
||||
Www-Authenticate HTTP response header indicating how to
|
||||
authenticate.`,
|
||||
HTTPStatusCode: http.StatusUnauthorized,
|
||||
})
|
||||
|
||||
// ErrorCodeDenied is returned if a client does not have sufficient
|
||||
// permission to perform an action.
|
||||
ErrorCodeDenied = Register("errcode", ErrorDescriptor{
|
||||
Value: "DENIED",
|
||||
Message: "requested access to the resource is denied",
|
||||
Description: `The access controller denied access for the
|
||||
operation on a resource.`,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
})
|
||||
|
||||
// ErrorCodeUnavailable provides a common error to report unavailability
|
||||
// of a service or endpoint.
|
||||
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNAVAILABLE",
|
||||
Message: "service unavailable",
|
||||
Description: "Returned when a service is not available",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
})
|
||||
|
||||
// ErrorCodeTooManyRequests is returned if a client attempts too many
|
||||
// times to contact a service endpoint.
|
||||
ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
|
||||
Value: "TOOMANYREQUESTS",
|
||||
Message: "too many requests",
|
||||
Description: `Returned when a client attempts to contact a
|
||||
service too many times`,
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
})
|
||||
)
|
||||
|
||||
var nextCode = 1000
|
||||
var registerLock sync.Mutex
|
||||
|
||||
// Register will make the passed-in error known to the environment and
|
||||
// return a new ErrorCode
|
||||
func Register(group string, descriptor ErrorDescriptor) ErrorCode {
|
||||
registerLock.Lock()
|
||||
defer registerLock.Unlock()
|
||||
|
||||
descriptor.Code = ErrorCode(nextCode)
|
||||
|
||||
if _, ok := idToDescriptors[descriptor.Value]; ok {
|
||||
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
|
||||
}
|
||||
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
|
||||
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
|
||||
}
|
||||
|
||||
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
|
||||
errorCodeToDescriptors[descriptor.Code] = descriptor
|
||||
idToDescriptors[descriptor.Value] = descriptor
|
||||
|
||||
nextCode++
|
||||
return descriptor.Code
|
||||
}
|
||||
|
||||
type byValue []ErrorDescriptor
|
||||
|
||||
func (a byValue) Len() int { return len(a) }
|
||||
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
|
||||
// GetGroupNames returns the list of Error group names that are registered
|
||||
func GetGroupNames() []string {
|
||||
keys := []string{}
|
||||
|
||||
for k := range groupToDescriptors {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// GetErrorCodeGroup returns the named group of error descriptors
|
||||
func GetErrorCodeGroup(name string) []ErrorDescriptor {
|
||||
desc := groupToDescriptors[name]
|
||||
sort.Sort(byValue(desc))
|
||||
return desc
|
||||
}
|
||||
|
||||
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
|
||||
// registered, irrespective of what group they're in
|
||||
func GetErrorAllDescriptors() []ErrorDescriptor {
|
||||
result := []ErrorDescriptor{}
|
||||
|
||||
for _, group := range GetGroupNames() {
|
||||
result = append(result, GetErrorCodeGroup(group)...)
|
||||
}
|
||||
sort.Sort(byValue(result))
|
||||
return result
|
||||
}
|
||||
355
core/remotes/docker/fetcher.go
Normal file
355
core/remotes/docker/fetcher.go
Normal file
@@ -0,0 +1,355 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type dockerFetcher struct {
|
||||
*dockerBase
|
||||
}
|
||||
|
||||
func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest))
|
||||
|
||||
hosts := r.filterHosts(HostCapabilityPull)
|
||||
if len(hosts) == 0 {
|
||||
return nil, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
|
||||
// firstly try fetch via external urls
|
||||
for _, us := range desc.URLs {
|
||||
u, err := url.Parse(us)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("failed to parse %q", us)
|
||||
continue
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
log.G(ctx).Debug("non-http(s) alternative url is unsupported")
|
||||
continue
|
||||
}
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u))
|
||||
log.G(ctx).Info("request")
|
||||
|
||||
// Try this first, parse it
|
||||
host := RegistryHost{
|
||||
Client: http.DefaultClient,
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Path: u.Path,
|
||||
Capabilities: HostCapabilityPull,
|
||||
}
|
||||
req := r.request(host, http.MethodGet)
|
||||
// Strip namespace from base
|
||||
req.path = u.Path
|
||||
if u.RawQuery != "" {
|
||||
req.path = req.path + "?" + u.RawQuery
|
||||
}
|
||||
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
continue // try one of the other urls.
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Try manifests endpoints for manifests types
|
||||
if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) ||
|
||||
desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
||||
|
||||
var firstErr error
|
||||
for _, host := range r.hosts {
|
||||
req := r.request(host, http.MethodGet, "manifests", desc.Digest.String())
|
||||
if err := req.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||
if err != nil {
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
continue // try another host
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
return nil, firstErr
|
||||
}
|
||||
|
||||
// Finally use blobs endpoints
|
||||
var firstErr error
|
||||
for _, host := range r.hosts {
|
||||
req := r.request(host, http.MethodGet, "blobs", desc.Digest.String())
|
||||
if err := req.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||
if err != nil {
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
continue // try another host
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
if errdefs.IsNotFound(firstErr) {
|
||||
firstErr = fmt.Errorf("could not fetch content descriptor %v (%v) from remote: %w",
|
||||
desc.Digest, desc.MediaType, errdefs.ErrNotFound,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, firstErr
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, mediatype string, ps ...string) (*request, int64, error) {
|
||||
headReq := r.request(host, http.MethodHead, ps...)
|
||||
if err := headReq.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if mediatype == "" {
|
||||
headReq.header.Set("Accept", "*/*")
|
||||
} else {
|
||||
headReq.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
|
||||
}
|
||||
|
||||
headResp, err := headReq.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if headResp.Body != nil {
|
||||
headResp.Body.Close()
|
||||
}
|
||||
if headResp.StatusCode > 299 {
|
||||
return nil, 0, fmt.Errorf("unexpected HEAD status code %v: %s", headReq.String(), headResp.Status)
|
||||
}
|
||||
|
||||
getReq := r.request(host, http.MethodGet, ps...)
|
||||
if err := getReq.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return getReq, headResp.ContentLength, nil
|
||||
}
|
||||
|
||||
func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...remotes.FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error) {
|
||||
var desc ocispec.Descriptor
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", dgst))
|
||||
var config remotes.FetchByDigestConfig
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, &config); err != nil {
|
||||
return nil, desc, err
|
||||
}
|
||||
}
|
||||
|
||||
hosts := r.filterHosts(HostCapabilityPull)
|
||||
if len(hosts) == 0 {
|
||||
return nil, desc, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false)
|
||||
if err != nil {
|
||||
return nil, desc, err
|
||||
}
|
||||
|
||||
var (
|
||||
getReq *request
|
||||
sz int64
|
||||
firstErr error
|
||||
)
|
||||
|
||||
for _, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "blobs", dgst.String())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if getReq == nil {
|
||||
// Fall back to the "manifests" endpoint
|
||||
for _, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "manifests", dgst.String())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if getReq == nil {
|
||||
if errdefs.IsNotFound(firstErr) {
|
||||
firstErr = fmt.Errorf("could not fetch content %v from remote: %w", dgst, errdefs.ErrNotFound)
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = fmt.Errorf("could not fetch content %v from remote: (unknown)", dgst)
|
||||
}
|
||||
return nil, desc, firstErr
|
||||
}
|
||||
|
||||
seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) {
|
||||
return r.open(ctx, getReq, config.Mediatype, offset)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, desc, err
|
||||
}
|
||||
|
||||
desc = ocispec.Descriptor{
|
||||
MediaType: "application/octet-stream",
|
||||
Digest: dgst,
|
||||
Size: sz,
|
||||
}
|
||||
return seeker, desc, nil
|
||||
}
|
||||
|
||||
func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) {
|
||||
if mediatype == "" {
|
||||
req.header.Set("Accept", "*/*")
|
||||
} else {
|
||||
req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
|
||||
}
|
||||
req.header.Set("Accept-Encoding", "zstd;q=1.0, gzip;q=0.8, deflate;q=0.5")
|
||||
|
||||
if offset > 0 {
|
||||
// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
|
||||
// will return the header without supporting the range. The content
|
||||
// range must always be checked.
|
||||
req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
||||
}
|
||||
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode > 299 {
|
||||
// TODO(stevvooe): When doing a offset specific request, we should
|
||||
// really distinguish between a 206 and a 200. In the case of 200, we
|
||||
// can discard the bytes, hiding the seek behavior from the
|
||||
// implementation.
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound)
|
||||
}
|
||||
var registryErr Errors
|
||||
if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 {
|
||||
return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
|
||||
}
|
||||
if offset > 0 {
|
||||
cr := resp.Header.Get("content-range")
|
||||
if cr != "" {
|
||||
if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
|
||||
return nil, fmt.Errorf("unhandled content range in response: %v", cr)
|
||||
|
||||
}
|
||||
} else {
|
||||
// TODO: Should any cases where use of content range
|
||||
// without the proper header be considered?
|
||||
// 206 responses?
|
||||
|
||||
// Discard up to offset
|
||||
// Could use buffer pool here but this case should be rare
|
||||
n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discard to offset: %w", err)
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.New("unable to discard to offset")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
body := resp.Body
|
||||
encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool {
|
||||
return r == ' ' || r == '\t' || r == ','
|
||||
})
|
||||
for i := len(encoding) - 1; i >= 0; i-- {
|
||||
algorithm := strings.ToLower(encoding[i])
|
||||
switch algorithm {
|
||||
case "zstd":
|
||||
r, err := zstd.NewReader(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body = r.IOReadCloser()
|
||||
case "gzip":
|
||||
body, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "deflate":
|
||||
body = flate.NewReader(body)
|
||||
case "identity", "":
|
||||
// no content-encoding applied, use raw body
|
||||
default:
|
||||
return nil, errors.New("unsupported Content-Encoding algorithm: " + algorithm)
|
||||
}
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
75
core/remotes/docker/fetcher_fuzz.go
Normal file
75
core/remotes/docker/fetcher_fuzz.go
Normal file
@@ -0,0 +1,75 @@
|
||||
//go:build gofuzz
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func FuzzFetcher(data []byte) int {
|
||||
dataLen := len(data)
|
||||
if dataLen == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen))
|
||||
rw.Header().Set("content-length", strconv.Itoa(dataLen))
|
||||
rw.Write(data)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
f := dockerFetcher{&dockerBase{
|
||||
repository: "nonempty",
|
||||
}}
|
||||
host := RegistryHost{
|
||||
Client: s.Client(),
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Path: u.Path,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
req := f.request(host, http.MethodGet)
|
||||
rc, err := f.open(ctx, req, "", 0)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
b, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
expected := data
|
||||
if len(b) != len(expected) {
|
||||
panic("len of request is not equal to len of expected but should be")
|
||||
}
|
||||
return 1
|
||||
}
|
||||
363
core/remotes/docker/fetcher_test.go
Normal file
363
core/remotes/docker/fetcher_test.go
Normal file
@@ -0,0 +1,363 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFetcherOpen(t *testing.T) {
|
||||
content := make([]byte, 128)
|
||||
rand.New(rand.NewSource(1)).Read(content)
|
||||
start := 0
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if start > 0 {
|
||||
rw.Header().Set("content-range", fmt.Sprintf("bytes %d-127/128", start))
|
||||
}
|
||||
rw.Header().Set("content-length", strconv.Itoa(len(content[start:])))
|
||||
_, _ = rw.Write(content[start:])
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f := dockerFetcher{&dockerBase{
|
||||
repository: "nonempty",
|
||||
}}
|
||||
|
||||
host := RegistryHost{
|
||||
Client: s.Client(),
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Path: u.Path,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
req := f.request(host, http.MethodGet)
|
||||
|
||||
checkReader := func(o int64) {
|
||||
t.Helper()
|
||||
|
||||
rc, err := f.open(ctx, req, "", o)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open: %+v", err)
|
||||
}
|
||||
b, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := content[o:]
|
||||
if len(b) != len(expected) {
|
||||
t.Errorf("unexpected length %d, expected %d", len(b), len(expected))
|
||||
return
|
||||
}
|
||||
for i, c := range expected {
|
||||
if b[i] != c {
|
||||
t.Errorf("unexpected byte %x at %d, expected %x", b[i], i, c)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
checkReader(0)
|
||||
|
||||
// Test server ignores content range
|
||||
checkReader(25)
|
||||
|
||||
// Use content range on server
|
||||
start = 20
|
||||
checkReader(20)
|
||||
|
||||
// Check returning just last byte and no bytes
|
||||
start = 127
|
||||
checkReader(127)
|
||||
start = 128
|
||||
checkReader(128)
|
||||
|
||||
// Check that server returning a different content range
|
||||
// then requested errors
|
||||
start = 30
|
||||
_, err = f.open(ctx, req, "", 20)
|
||||
if err == nil {
|
||||
t.Fatal("expected error opening with invalid server response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestContentEncoding(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
zstdEncode := func(in []byte) []byte {
|
||||
var b bytes.Buffer
|
||||
zw, err := zstd.NewWriter(&b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = zw.Write(in)
|
||||
if err != nil {
|
||||
t.Fatal()
|
||||
}
|
||||
err = zw.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
gzipEncode := func(in []byte) []byte {
|
||||
var b bytes.Buffer
|
||||
gw := gzip.NewWriter(&b)
|
||||
_, err := gw.Write(in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = gw.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
flateEncode := func(in []byte) []byte {
|
||||
var b bytes.Buffer
|
||||
dw, err := flate.NewWriter(&b, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = dw.Write(in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = dw.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
encodingFuncs []func([]byte) []byte
|
||||
encodingHeader string
|
||||
}{
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{},
|
||||
encodingHeader: "",
|
||||
},
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{zstdEncode},
|
||||
encodingHeader: "zstd",
|
||||
},
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{gzipEncode},
|
||||
encodingHeader: "gzip",
|
||||
},
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{flateEncode},
|
||||
encodingHeader: "deflate",
|
||||
},
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{zstdEncode, gzipEncode},
|
||||
encodingHeader: "zstd,gzip",
|
||||
},
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{gzipEncode, flateEncode},
|
||||
encodingHeader: "gzip,deflate",
|
||||
},
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{gzipEncode, zstdEncode},
|
||||
encodingHeader: "gzip,zstd",
|
||||
},
|
||||
{
|
||||
encodingFuncs: []func([]byte) []byte{gzipEncode, zstdEncode, flateEncode},
|
||||
encodingHeader: "gzip,zstd,deflate",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.encodingHeader, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
content := make([]byte, 128)
|
||||
rand.New(rand.NewSource(1)).Read(content)
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
compressedContent := content
|
||||
for _, enc := range tc.encodingFuncs {
|
||||
compressedContent = enc(compressedContent)
|
||||
}
|
||||
rw.Header().Set("content-length", fmt.Sprintf("%d", len(compressedContent)))
|
||||
rw.Header().Set("Content-Encoding", tc.encodingHeader)
|
||||
rw.Write(compressedContent)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f := dockerFetcher{&dockerBase{
|
||||
repository: "nonempty",
|
||||
}}
|
||||
|
||||
host := RegistryHost{
|
||||
Client: s.Client(),
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Path: u.Path,
|
||||
}
|
||||
|
||||
req := f.request(host, http.MethodGet)
|
||||
|
||||
rc, err := f.open(context.Background(), req, "", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open for encoding %s: %+v", tc.encodingHeader, err)
|
||||
}
|
||||
b, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := content
|
||||
if len(b) != len(expected) {
|
||||
t.Errorf("unexpected length %d, expected %d", len(b), len(expected))
|
||||
return
|
||||
}
|
||||
for i, c := range expected {
|
||||
if b[i] != c {
|
||||
t.Errorf("unexpected byte %x at %d, expected %x", b[i], i, c)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// New set of tests to test new error cases
|
||||
func TestDockerFetcherOpen(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mockedStatus int
|
||||
mockedErr error
|
||||
want io.ReadCloser
|
||||
wantErr bool
|
||||
wantServerMessageError bool
|
||||
wantPlainError bool
|
||||
retries int
|
||||
}{
|
||||
{
|
||||
name: "should return status and error.message if it exists if the registry request fails",
|
||||
mockedStatus: 500,
|
||||
mockedErr: Errors{Error{
|
||||
Code: ErrorCodeUnknown,
|
||||
Message: "Test Error",
|
||||
}},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
wantServerMessageError: true,
|
||||
},
|
||||
{
|
||||
name: "should return just status if the registry request fails and does not return a docker error",
|
||||
mockedStatus: 500,
|
||||
mockedErr: fmt.Errorf("Non-docker error"),
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
wantPlainError: true,
|
||||
}, {
|
||||
name: "should return StatusRequestTimeout after 5 retries",
|
||||
mockedStatus: http.StatusRequestTimeout,
|
||||
mockedErr: fmt.Errorf(http.StatusText(http.StatusRequestTimeout)),
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
wantPlainError: true,
|
||||
retries: 5,
|
||||
}, {
|
||||
name: "should return StatusTooManyRequests after 5 retries",
|
||||
mockedStatus: http.StatusTooManyRequests,
|
||||
mockedErr: fmt.Errorf(http.StatusText(http.StatusTooManyRequests)),
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
wantPlainError: true,
|
||||
retries: 5,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if tt.retries > 0 {
|
||||
tt.retries--
|
||||
}
|
||||
rw.WriteHeader(tt.mockedStatus)
|
||||
bytes, _ := json.Marshal(tt.mockedErr)
|
||||
rw.Write(bytes)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f := dockerFetcher{&dockerBase{
|
||||
repository: "ns",
|
||||
}}
|
||||
|
||||
host := RegistryHost{
|
||||
Client: s.Client(),
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Path: u.Path,
|
||||
}
|
||||
|
||||
req := f.request(host, http.MethodGet)
|
||||
|
||||
got, err := f.open(context.TODO(), req, "", 0)
|
||||
assert.Equal(t, tt.wantErr, err != nil)
|
||||
assert.Equal(t, tt.want, got)
|
||||
assert.Equal(t, tt.retries, 0)
|
||||
if tt.wantErr {
|
||||
var expectedError error
|
||||
if tt.wantServerMessageError {
|
||||
expectedError = fmt.Errorf("unexpected status code %v/ns: %v %s - Server message: %s", s.URL, tt.mockedStatus, http.StatusText(tt.mockedStatus), tt.mockedErr.Error())
|
||||
} else if tt.wantPlainError {
|
||||
expectedError = fmt.Errorf("unexpected status code %v/ns: %v %s", s.URL, tt.mockedStatus, http.StatusText(tt.mockedStatus))
|
||||
}
|
||||
assert.Equal(t, expectedError.Error(), err.Error())
|
||||
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
149
core/remotes/docker/handler.go
Normal file
149
core/remotes/docker/handler.go
Normal file
@@ -0,0 +1,149 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/labels"
|
||||
"github.com/containerd/containerd/v2/reference"
|
||||
"github.com/containerd/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// AppendDistributionSourceLabel updates the label of blob with distribution source.
|
||||
func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) {
|
||||
refspec, err := reference.Parse(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse("dummy://" + refspec.Locator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/")
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
info, err := manager.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := distributionSourceLabelKey(source)
|
||||
|
||||
originLabel := ""
|
||||
if info.Labels != nil {
|
||||
originLabel = info.Labels[key]
|
||||
}
|
||||
value := appendDistributionSourceLabel(originLabel, repo)
|
||||
|
||||
// The repo name has been limited under 256 and the distribution
|
||||
// label might hit the limitation of label size, when blob data
|
||||
// is used as the very, very common layer.
|
||||
if err := labels.Validate(key, value); err != nil {
|
||||
log.G(ctx).Warnf("skip to append distribution label: %s", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
info = content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{
|
||||
key: value,
|
||||
},
|
||||
}
|
||||
_, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key))
|
||||
return nil, err
|
||||
}, nil
|
||||
}
|
||||
|
||||
func appendDistributionSourceLabel(originLabel, repo string) string {
|
||||
repos := []string{}
|
||||
if originLabel != "" {
|
||||
repos = strings.Split(originLabel, ",")
|
||||
}
|
||||
repos = append(repos, repo)
|
||||
|
||||
// use empty string to present duplicate items
|
||||
for i := 1; i < len(repos); i++ {
|
||||
tmp, j := repos[i], i-1
|
||||
for ; j >= 0 && repos[j] >= tmp; j-- {
|
||||
if repos[j] == tmp {
|
||||
tmp = ""
|
||||
}
|
||||
repos[j+1] = repos[j]
|
||||
}
|
||||
repos[j+1] = tmp
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < len(repos) && repos[i] == ""; i++ {
|
||||
}
|
||||
|
||||
return strings.Join(repos[i:], ",")
|
||||
}
|
||||
|
||||
func distributionSourceLabelKey(source string) string {
|
||||
return labels.LabelDistributionSource + "." + source
|
||||
}
|
||||
|
||||
// selectRepositoryMountCandidate will select the repo which has longest
|
||||
// common prefix components as the candidate.
|
||||
func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string {
|
||||
u, err := url.Parse("dummy://" + refspec.Locator)
|
||||
if err != nil {
|
||||
// NOTE: basically, it won't be error here
|
||||
return ""
|
||||
}
|
||||
|
||||
source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/")
|
||||
repoLabel, ok := sources[distributionSourceLabelKey(source)]
|
||||
if !ok || repoLabel == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
n, match := 0, ""
|
||||
components := strings.Split(target, "/")
|
||||
for _, repo := range strings.Split(repoLabel, ",") {
|
||||
// the target repo is not a candidate
|
||||
if repo == target {
|
||||
continue
|
||||
}
|
||||
|
||||
if l := commonPrefixComponents(components, repo); l >= n {
|
||||
n, match = l, repo
|
||||
}
|
||||
}
|
||||
return match
|
||||
}
|
||||
|
||||
func commonPrefixComponents(components []string, target string) int {
|
||||
targetComponents := strings.Split(target, "/")
|
||||
|
||||
i := 0
|
||||
for ; i < len(components) && i < len(targetComponents); i++ {
|
||||
if components[i] != targetComponents[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
133
core/remotes/docker/handler_test.go
Normal file
133
core/remotes/docker/handler_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/v2/labels"
|
||||
"github.com/containerd/containerd/v2/reference"
|
||||
)
|
||||
|
||||
func TestAppendDistributionLabel(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
originLabel string
|
||||
repo string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
originLabel: "",
|
||||
repo: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
originLabel: "",
|
||||
repo: "library/busybox",
|
||||
expected: "library/busybox",
|
||||
},
|
||||
{
|
||||
originLabel: "library/busybox",
|
||||
repo: "library/busybox",
|
||||
expected: "library/busybox",
|
||||
},
|
||||
// remove the duplicate one in origin
|
||||
{
|
||||
originLabel: "library/busybox,library/redis,library/busybox",
|
||||
repo: "library/alpine",
|
||||
expected: "library/alpine,library/busybox,library/redis",
|
||||
},
|
||||
// remove the empty repo
|
||||
{
|
||||
originLabel: "library/busybox,library/redis,library/busybox",
|
||||
repo: "",
|
||||
expected: "library/busybox,library/redis",
|
||||
},
|
||||
{
|
||||
originLabel: "library/busybox,library/redis,library/busybox",
|
||||
repo: "library/redis",
|
||||
expected: "library/busybox,library/redis",
|
||||
},
|
||||
} {
|
||||
if got := appendDistributionSourceLabel(tc.originLabel, tc.repo); !reflect.DeepEqual(got, tc.expected) {
|
||||
t.Fatalf("expected %v, but got %v", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistributionSourceLabelKey(t *testing.T) {
|
||||
expected := labels.LabelDistributionSource + ".testsource"
|
||||
if got := distributionSourceLabelKey("testsource"); !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("expected %v, but got %v", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommonPrefixComponents(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
components []string
|
||||
target string
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
components: []string{"foo"},
|
||||
target: "foo/bar",
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
components: []string{"bar"},
|
||||
target: "foo/bar",
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
components: []string{"foo", "bar"},
|
||||
target: "foo/bar",
|
||||
expected: 2,
|
||||
},
|
||||
} {
|
||||
if got := commonPrefixComponents(tc.components, tc.target); !reflect.DeepEqual(got, tc.expected) {
|
||||
t.Fatalf("expected %v, but got %v", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelectRepositoryMountCandidate(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
refspec reference.Spec
|
||||
source map[string]string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
refspec: reference.Spec{},
|
||||
source: map[string]string{"": ""},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
refspec: reference.Spec{Locator: "user@host/path"},
|
||||
source: map[string]string{labels.LabelDistributionSource + ".host": "foo,path,bar"},
|
||||
expected: "bar",
|
||||
},
|
||||
{
|
||||
refspec: reference.Spec{Locator: "user@host/path"},
|
||||
source: map[string]string{labels.LabelDistributionSource + ".host": "foo,bar,path"},
|
||||
expected: "bar",
|
||||
},
|
||||
} {
|
||||
if got := selectRepositoryMountCandidate(tc.refspec, tc.source); !reflect.DeepEqual(got, tc.expected) {
|
||||
t.Fatalf("expected %v, but got %v", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
178
core/remotes/docker/httpreadseeker.go
Normal file
178
core/remotes/docker/httpreadseeker.go
Normal file
@@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
const maxRetry = 3
|
||||
|
||||
type httpReadSeeker struct {
|
||||
size int64
|
||||
offset int64
|
||||
rc io.ReadCloser
|
||||
open func(offset int64) (io.ReadCloser, error)
|
||||
closed bool
|
||||
|
||||
errsWithNoProgress int
|
||||
}
|
||||
|
||||
func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) {
|
||||
return &httpReadSeeker{
|
||||
size: size,
|
||||
open: open,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
|
||||
if hrs.closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
rd, err := hrs.reader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n, err = rd.Read(p)
|
||||
hrs.offset += int64(n)
|
||||
if n > 0 || err == nil {
|
||||
hrs.errsWithNoProgress = 0
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// connection closed unexpectedly. try reconnecting.
|
||||
if n == 0 {
|
||||
hrs.errsWithNoProgress++
|
||||
if hrs.errsWithNoProgress > maxRetry {
|
||||
return // too many retries for this offset with no progress
|
||||
}
|
||||
}
|
||||
if hrs.rc != nil {
|
||||
if clsErr := hrs.rc.Close(); clsErr != nil {
|
||||
log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser")
|
||||
}
|
||||
hrs.rc = nil
|
||||
}
|
||||
if _, err2 := hrs.reader(); err2 == nil {
|
||||
return n, nil
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
// The CRI's imagePullProgressTimeout relies on responseBody.Close to
|
||||
// update the process monitor's status. If the err is io.EOF, close
|
||||
// the connection since there is no more available data.
|
||||
if hrs.rc != nil {
|
||||
if clsErr := hrs.rc.Close(); clsErr != nil {
|
||||
log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser after io.EOF")
|
||||
}
|
||||
hrs.rc = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (hrs *httpReadSeeker) Close() error {
|
||||
if hrs.closed {
|
||||
return nil
|
||||
}
|
||||
hrs.closed = true
|
||||
if hrs.rc != nil {
|
||||
return hrs.rc.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
if hrs.closed {
|
||||
return 0, fmt.Errorf("Fetcher.Seek: closed: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
|
||||
abs := hrs.offset
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
abs = offset
|
||||
case io.SeekCurrent:
|
||||
abs += offset
|
||||
case io.SeekEnd:
|
||||
if hrs.size == -1 {
|
||||
return 0, fmt.Errorf("Fetcher.Seek: unknown size, cannot seek from end: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
abs = hrs.size + offset
|
||||
default:
|
||||
return 0, fmt.Errorf("Fetcher.Seek: invalid whence: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if abs < 0 {
|
||||
return 0, fmt.Errorf("Fetcher.Seek: negative offset: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if abs != hrs.offset {
|
||||
if hrs.rc != nil {
|
||||
if err := hrs.rc.Close(); err != nil {
|
||||
log.L.WithError(err).Error("Fetcher.Seek: failed to close ReadCloser")
|
||||
}
|
||||
|
||||
hrs.rc = nil
|
||||
}
|
||||
|
||||
hrs.offset = abs
|
||||
}
|
||||
|
||||
return hrs.offset, nil
|
||||
}
|
||||
|
||||
func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||
if hrs.rc != nil {
|
||||
return hrs.rc, nil
|
||||
}
|
||||
|
||||
if hrs.size == -1 || hrs.offset < hrs.size {
|
||||
// only try to reopen the body request if we are seeking to a value
|
||||
// less than the actual size.
|
||||
if hrs.open == nil {
|
||||
return nil, fmt.Errorf("cannot open: %w", errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
rc, err := hrs.open(hrs.offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("httpReadSeeker: failed open: %w", err)
|
||||
}
|
||||
|
||||
if hrs.rc != nil {
|
||||
if err := hrs.rc.Close(); err != nil {
|
||||
log.L.WithError(err).Error("httpReadSeeker: failed to close ReadCloser")
|
||||
}
|
||||
}
|
||||
hrs.rc = rc
|
||||
} else {
|
||||
// There is an edge case here where offset == size of the content. If
|
||||
// we seek, we will probably get an error for content that cannot be
|
||||
// sought (?). In that case, we should err on committing the content,
|
||||
// as the length is already satisfied but we just return the empty
|
||||
// reader instead.
|
||||
|
||||
hrs.rc = io.NopCloser(bytes.NewReader([]byte{}))
|
||||
}
|
||||
|
||||
return hrs.rc, nil
|
||||
}
|
||||
541
core/remotes/docker/pusher.go
Normal file
541
core/remotes/docker/pusher.go
Normal file
@@ -0,0 +1,541 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type dockerPusher struct {
|
||||
*dockerBase
|
||||
object string
|
||||
|
||||
// TODO: namespace tracker
|
||||
tracker StatusTracker
|
||||
}
|
||||
|
||||
// Writer implements Ingester API of content store. This allows the client
|
||||
// to receive ErrUnavailable when there is already an on-going upload.
|
||||
// Note that the tracker MUST implement StatusTrackLocker interface to avoid
|
||||
// race condition on StatusTracker.
|
||||
func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||
var wOpts content.WriterOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&wOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if wOpts.Ref == "" {
|
||||
return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
return p.push(ctx, wOpts.Desc, wOpts.Ref, true)
|
||||
}
|
||||
|
||||
func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
|
||||
return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false)
|
||||
}
|
||||
|
||||
func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) {
|
||||
if l, ok := p.tracker.(StatusTrackLocker); ok {
|
||||
l.Lock(ref)
|
||||
defer l.Unlock(ref)
|
||||
}
|
||||
ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status, err := p.tracker.GetStatus(ref)
|
||||
if err == nil {
|
||||
if status.Committed && status.Offset == status.Total {
|
||||
return nil, fmt.Errorf("ref %v: %w", ref, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
if unavailableOnFail && status.ErrClosed == nil {
|
||||
// Another push of this ref is happening elsewhere. The rest of function
|
||||
// will continue only when `errdefs.IsNotFound(err) == true` (i.e. there
|
||||
// is no actively-tracked ref already).
|
||||
return nil, fmt.Errorf("push is on-going: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
// TODO: Handle incomplete status
|
||||
} else if !errdefs.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
hosts := p.filterHosts(HostCapabilityPush)
|
||||
if len(hosts) == 0 {
|
||||
return nil, fmt.Errorf("no push hosts: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
var (
|
||||
isManifest bool
|
||||
existCheck []string
|
||||
host = hosts[0]
|
||||
)
|
||||
|
||||
if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) {
|
||||
isManifest = true
|
||||
existCheck = getManifestPath(p.object, desc.Digest)
|
||||
} else {
|
||||
existCheck = []string{"blobs", desc.Digest.String()}
|
||||
}
|
||||
|
||||
req := p.request(host, http.MethodHead, existCheck...)
|
||||
req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", "))
|
||||
|
||||
log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to")
|
||||
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrInvalidAuthorization) {
|
||||
return nil, err
|
||||
}
|
||||
log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push")
|
||||
} else {
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
var exists bool
|
||||
if isManifest && existCheck[1] != desc.Digest.String() {
|
||||
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
|
||||
if dgstHeader == desc.Digest {
|
||||
exists = true
|
||||
}
|
||||
} else {
|
||||
exists = true
|
||||
}
|
||||
|
||||
if exists {
|
||||
p.tracker.SetStatus(ref, Status{
|
||||
Committed: true,
|
||||
PushStatus: PushStatus{
|
||||
Exists: true,
|
||||
},
|
||||
Status: content.Status{
|
||||
Ref: ref,
|
||||
// TODO: Set updated time?
|
||||
},
|
||||
})
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
} else if resp.StatusCode != http.StatusNotFound {
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
if isManifest {
|
||||
putPath := getManifestPath(p.object, desc.Digest)
|
||||
req = p.request(host, http.MethodPut, putPath...)
|
||||
req.header.Add("Content-Type", desc.MediaType)
|
||||
} else {
|
||||
// Start upload request
|
||||
req = p.request(host, http.MethodPost, "blobs", "uploads/")
|
||||
|
||||
mountedFrom := ""
|
||||
var resp *http.Response
|
||||
if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
|
||||
preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
|
||||
pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo)
|
||||
|
||||
// NOTE: the fromRepo might be private repo and
|
||||
// auth service still can grant token without error.
|
||||
// but the post request will fail because of 401.
|
||||
//
|
||||
// for the private repo, we should remove mount-from
|
||||
// query and send the request again.
|
||||
resp, err = preq.doWithRetries(pctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
log.G(ctx).Debugf("failed to mount from repository %s", fromRepo)
|
||||
|
||||
resp.Body.Close()
|
||||
resp = nil
|
||||
case http.StatusCreated:
|
||||
mountedFrom = path.Join(p.refspec.Hostname(), fromRepo)
|
||||
}
|
||||
}
|
||||
|
||||
if resp == nil {
|
||||
resp, err = req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrInvalidAuthorization) {
|
||||
return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
|
||||
case http.StatusCreated:
|
||||
p.tracker.SetStatus(ref, Status{
|
||||
Committed: true,
|
||||
PushStatus: PushStatus{
|
||||
MountedFrom: mountedFrom,
|
||||
},
|
||||
Status: content.Status{
|
||||
Ref: ref,
|
||||
Total: desc.Size,
|
||||
Offset: desc.Size,
|
||||
},
|
||||
})
|
||||
return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
|
||||
default:
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
location = resp.Header.Get("Location")
|
||||
lurl *url.URL
|
||||
lhost = host
|
||||
)
|
||||
// Support paths without host in location
|
||||
if strings.HasPrefix(location, "/") {
|
||||
lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse location %v: %w", location, err)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(location, "://") {
|
||||
location = lhost.Scheme + "://" + location
|
||||
}
|
||||
lurl, err = url.Parse(location)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse location %v: %w", location, err)
|
||||
}
|
||||
|
||||
if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme {
|
||||
lhost.Scheme = lurl.Scheme
|
||||
lhost.Host = lurl.Host
|
||||
|
||||
// Check if different than what was requested, accounting for fallback in the transport layer
|
||||
requested := resp.Request.URL
|
||||
if requested.Host != lhost.Host || requested.Scheme != lhost.Scheme {
|
||||
// Strip authorizer if change to host or scheme
|
||||
lhost.Authorizer = nil
|
||||
log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination, authorizer removed")
|
||||
}
|
||||
}
|
||||
}
|
||||
q := lurl.Query()
|
||||
q.Add("digest", desc.Digest.String())
|
||||
|
||||
req = p.request(lhost, http.MethodPut)
|
||||
req.header.Set("Content-Type", "application/octet-stream")
|
||||
req.path = lurl.Path + "?" + q.Encode()
|
||||
}
|
||||
p.tracker.SetStatus(ref, Status{
|
||||
Status: content.Status{
|
||||
Ref: ref,
|
||||
Total: desc.Size,
|
||||
Expected: desc.Digest,
|
||||
StartedAt: time.Now(),
|
||||
},
|
||||
})
|
||||
|
||||
// TODO: Support chunked upload
|
||||
|
||||
pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest)
|
||||
|
||||
req.body = func() (io.ReadCloser, error) {
|
||||
pr, pw := io.Pipe()
|
||||
pushw.setPipe(pw)
|
||||
return io.NopCloser(pr), nil
|
||||
}
|
||||
req.size = desc.Size
|
||||
|
||||
go func() {
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
pushw.setError(err)
|
||||
pushw.Close()
|
||||
return
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusNoContent:
|
||||
default:
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
pushw.setError(err)
|
||||
pushw.Close()
|
||||
}
|
||||
pushw.setResponse(resp)
|
||||
}()
|
||||
|
||||
return pushw, nil
|
||||
}
|
||||
|
||||
func getManifestPath(object string, dgst digest.Digest) []string {
|
||||
if i := strings.IndexByte(object, '@'); i >= 0 {
|
||||
if object[i+1:] != dgst.String() {
|
||||
// use digest, not tag
|
||||
object = ""
|
||||
} else {
|
||||
// strip @<digest> for registry path to make tag
|
||||
object = object[:i]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if object == "" {
|
||||
return []string{"manifests", dgst.String()}
|
||||
}
|
||||
|
||||
return []string{"manifests", object}
|
||||
}
|
||||
|
||||
type pushWriter struct {
|
||||
base *dockerBase
|
||||
ref string
|
||||
|
||||
pipe *io.PipeWriter
|
||||
|
||||
pipeC chan *io.PipeWriter
|
||||
respC chan *http.Response
|
||||
closeOnce sync.Once
|
||||
errC chan error
|
||||
|
||||
isManifest bool
|
||||
|
||||
expected digest.Digest
|
||||
tracker StatusTracker
|
||||
}
|
||||
|
||||
func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter {
|
||||
// Initialize and create response
|
||||
return &pushWriter{
|
||||
base: db,
|
||||
ref: ref,
|
||||
expected: expected,
|
||||
tracker: tracker,
|
||||
pipeC: make(chan *io.PipeWriter, 1),
|
||||
respC: make(chan *http.Response, 1),
|
||||
errC: make(chan error, 1),
|
||||
isManifest: isManifest,
|
||||
}
|
||||
}
|
||||
|
||||
func (pw *pushWriter) setPipe(p *io.PipeWriter) {
|
||||
pw.pipeC <- p
|
||||
}
|
||||
|
||||
func (pw *pushWriter) setError(err error) {
|
||||
pw.errC <- err
|
||||
}
|
||||
func (pw *pushWriter) setResponse(resp *http.Response) {
|
||||
pw.respC <- resp
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Write(p []byte) (n int, err error) {
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if pw.pipe == nil {
|
||||
p, ok := <-pw.pipeC
|
||||
if !ok {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
pw.pipe = p
|
||||
} else {
|
||||
select {
|
||||
case p, ok := <-pw.pipeC:
|
||||
if !ok {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
pw.pipe.CloseWithError(content.ErrReset)
|
||||
pw.pipe = p
|
||||
|
||||
// If content has already been written, the bytes
|
||||
// cannot be written and the caller must reset
|
||||
status.Offset = 0
|
||||
status.UpdatedAt = time.Now()
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
return 0, content.ErrReset
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
n, err = pw.pipe.Write(p)
|
||||
if errors.Is(err, io.ErrClosedPipe) {
|
||||
// if the pipe is closed, we might have the original error on the error
|
||||
// channel - so we should try and get it
|
||||
select {
|
||||
case err2 := <-pw.errC:
|
||||
err = err2
|
||||
default:
|
||||
}
|
||||
}
|
||||
status.Offset += int64(n)
|
||||
status.UpdatedAt = time.Now()
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
return
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Close() error {
|
||||
// Ensure pipeC is closed but handle `Close()` being
|
||||
// called multiple times without panicking
|
||||
pw.closeOnce.Do(func() {
|
||||
close(pw.pipeC)
|
||||
})
|
||||
if pw.pipe != nil {
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err == nil && !status.Committed {
|
||||
// Closing an incomplete writer. Record this as an error so that following write can retry it.
|
||||
status.ErrClosed = errors.New("closed incomplete writer")
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
}
|
||||
return pw.pipe.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Status() (content.Status, error) {
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return content.Status{}, err
|
||||
}
|
||||
return status.Status, nil
|
||||
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Digest() digest.Digest {
|
||||
// TODO: Get rid of this function?
|
||||
return pw.expected
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Check whether read has already thrown an error
|
||||
if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||
return fmt.Errorf("pipe error before commit: %w", err)
|
||||
}
|
||||
|
||||
if err := pw.pipe.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: timeout waiting for response
|
||||
var resp *http.Response
|
||||
select {
|
||||
case err := <-pw.errC:
|
||||
return err
|
||||
case resp = <-pw.respC:
|
||||
defer resp.Body.Close()
|
||||
case p, ok := <-pw.pipeC:
|
||||
// check whether the pipe has changed in the commit, because sometimes Write
|
||||
// can complete successfully, but the pipe may have changed. In that case, the
|
||||
// content needs to be reset.
|
||||
if !ok {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
pw.pipe.CloseWithError(content.ErrReset)
|
||||
pw.pipe = p
|
||||
|
||||
// If content has already been written, the bytes
|
||||
// cannot be written again and the caller must reset
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status.Offset = 0
|
||||
status.UpdatedAt = time.Now()
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
return content.ErrReset
|
||||
}
|
||||
|
||||
// 201 is specified return status, some registries return
|
||||
// 200, 202 or 204.
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted:
|
||||
default:
|
||||
return remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
}
|
||||
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
if size > 0 && size != status.Offset {
|
||||
return fmt.Errorf("unexpected size %d, expected %d", status.Offset, size)
|
||||
}
|
||||
|
||||
if expected == "" {
|
||||
expected = status.Expected
|
||||
}
|
||||
|
||||
actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid content digest in response: %w", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
return fmt.Errorf("got digest %s, expected %s", actual, expected)
|
||||
}
|
||||
|
||||
status.Committed = true
|
||||
status.UpdatedAt = time.Now()
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Truncate(size int64) error {
|
||||
// TODO: if blob close request and start new request at offset
|
||||
// TODO: always error on manifest
|
||||
return errors.New("cannot truncate remote upload")
|
||||
}
|
||||
|
||||
func requestWithMountFrom(req *request, mount, from string) *request {
|
||||
creq := *req
|
||||
|
||||
sep := "?"
|
||||
if strings.Contains(creq.path, sep) {
|
||||
sep = "&"
|
||||
}
|
||||
|
||||
creq.path = creq.path + sep + "mount=" + mount + "&from=" + from
|
||||
|
||||
return &creq
|
||||
}
|
||||
519
core/remotes/docker/pusher_test.go
Normal file
519
core/remotes/docker/pusher_test.go
Normal file
@@ -0,0 +1,519 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/containerd/v2/reference"
|
||||
"github.com/containerd/log/logtest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetManifestPath(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
object string
|
||||
dgst digest.Digest
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
object: "foo",
|
||||
dgst: "bar",
|
||||
expected: []string{"manifests", "foo"},
|
||||
},
|
||||
{
|
||||
object: "foo@bar",
|
||||
dgst: "bar",
|
||||
expected: []string{"manifests", "foo"},
|
||||
},
|
||||
{
|
||||
object: "foo@bar",
|
||||
dgst: "foobar",
|
||||
expected: []string{"manifests", "foobar"},
|
||||
},
|
||||
} {
|
||||
if got := getManifestPath(tc.object, tc.dgst); !reflect.DeepEqual(got, tc.expected) {
|
||||
t.Fatalf("expected %v, but got %v", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPusherErrClosedRetry tests if retrying work when error occurred on close.
|
||||
func TestPusherErrClosedRetry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
p, reg, _, done := samplePusher(t)
|
||||
defer done()
|
||||
|
||||
layerContent := []byte("test")
|
||||
reg.uploadable = false
|
||||
if err := tryUpload(ctx, t, p, layerContent); err == nil {
|
||||
t.Errorf("upload should fail but succeeded")
|
||||
}
|
||||
|
||||
// retry
|
||||
reg.uploadable = true
|
||||
if err := tryUpload(ctx, t, p, layerContent); err != nil {
|
||||
t.Errorf("upload should succeed but got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPusherHTTPFallback(t *testing.T) {
|
||||
ctx := logtest.WithT(context.Background(), t)
|
||||
|
||||
p, reg, _, done := samplePusher(t)
|
||||
defer done()
|
||||
|
||||
reg.uploadable = true
|
||||
reg.username = "testuser"
|
||||
reg.secret = "testsecret"
|
||||
reg.locationPrefix = p.hosts[0].Scheme + "://" + p.hosts[0].Host
|
||||
|
||||
p.hosts[0].Scheme = "https"
|
||||
client := p.hosts[0].Client
|
||||
if client == nil {
|
||||
clientC := *http.DefaultClient
|
||||
client = &clientC
|
||||
}
|
||||
if client.Transport == nil {
|
||||
client.Transport = http.DefaultTransport
|
||||
}
|
||||
client.Transport = HTTPFallback{client.Transport}
|
||||
p.hosts[0].Client = client
|
||||
phost := p.hosts[0].Host
|
||||
p.hosts[0].Authorizer = NewDockerAuthorizer(WithAuthCreds(func(host string) (string, string, error) {
|
||||
if host == phost {
|
||||
return "testuser", "testsecret", nil
|
||||
}
|
||||
return "", "", nil
|
||||
}))
|
||||
|
||||
layerContent := []byte("test")
|
||||
if err := tryUpload(ctx, t, p, layerContent); err != nil {
|
||||
t.Errorf("upload failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPusherErrReset tests the push method if the request needs to be retried
|
||||
// i.e when ErrReset occurs
|
||||
func TestPusherErrReset(t *testing.T) {
|
||||
p, reg, _, done := samplePusher(t)
|
||||
defer done()
|
||||
|
||||
p.object = "latest@sha256:55d31f3af94c797b65b310569803cacc1c9f4a34bf61afcdc8138f89345c8308"
|
||||
|
||||
reg.uploadable = true
|
||||
reg.putHandlerFunc = func() func(w http.ResponseWriter, r *http.Request) bool {
|
||||
// sets whether the request should timeout so that a reset error can occur and
|
||||
// request will be retried
|
||||
shouldTimeout := true
|
||||
return func(w http.ResponseWriter, r *http.Request) bool {
|
||||
if shouldTimeout {
|
||||
shouldTimeout = !shouldTimeout
|
||||
w.WriteHeader(http.StatusRequestTimeout)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}()
|
||||
|
||||
ct := []byte("manifest-content")
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: ocispec.MediaTypeImageManifest,
|
||||
Digest: digest.FromBytes(ct),
|
||||
Size: int64(len(ct)),
|
||||
}
|
||||
|
||||
w, err := p.push(context.Background(), desc, remotes.MakeRefKey(context.Background(), desc), false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// first push should fail with ErrReset
|
||||
_, err = w.Write(ct)
|
||||
assert.NoError(t, err)
|
||||
err = w.Commit(context.Background(), desc.Size, desc.Digest)
|
||||
assert.Equal(t, content.ErrReset, err)
|
||||
|
||||
// second push should succeed
|
||||
_, err = w.Write(ct)
|
||||
assert.NoError(t, err)
|
||||
err = w.Commit(context.Background(), desc.Size, desc.Digest)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func tryUpload(ctx context.Context, t *testing.T, p dockerPusher, layerContent []byte) error {
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: ocispec.MediaTypeImageLayerGzip,
|
||||
Digest: digest.FromBytes(layerContent),
|
||||
Size: int64(len(layerContent)),
|
||||
}
|
||||
cw, err := p.Writer(ctx, content.WithRef("test-1"), content.WithDescriptor(desc))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cw.Close()
|
||||
if _, err := cw.Write(layerContent); err != nil {
|
||||
return err
|
||||
}
|
||||
return cw.Commit(ctx, 0, "")
|
||||
}
|
||||
|
||||
func samplePusher(t *testing.T) (dockerPusher, *uploadableMockRegistry, StatusTrackLocker, func()) {
|
||||
reg := &uploadableMockRegistry{
|
||||
availableContents: make([]string, 0),
|
||||
}
|
||||
s := httptest.NewServer(reg)
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tracker := NewInMemoryTracker()
|
||||
return dockerPusher{
|
||||
dockerBase: &dockerBase{
|
||||
refspec: reference.Spec{
|
||||
Locator: "example.com/samplerepository:latest",
|
||||
},
|
||||
repository: "samplerepository",
|
||||
hosts: []RegistryHost{
|
||||
{
|
||||
Client: s.Client(),
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Path: u.Path,
|
||||
Capabilities: HostCapabilityPush | HostCapabilityResolve,
|
||||
},
|
||||
},
|
||||
},
|
||||
object: "latest",
|
||||
tracker: tracker,
|
||||
}, reg, tracker, s.Close
|
||||
}
|
||||
|
||||
var manifestRegexp = regexp.MustCompile(`/([a-z0-9]+)/manifests/(.*)`)
|
||||
var blobUploadRegexp = regexp.MustCompile(`/([a-z0-9]+)/blobs/uploads/(.*)`)
|
||||
|
||||
// uploadableMockRegistry provides minimal registry APIs which are enough to serve requests from dockerPusher.
|
||||
type uploadableMockRegistry struct {
|
||||
availableContents []string
|
||||
uploadable bool
|
||||
putHandlerFunc func(w http.ResponseWriter, r *http.Request) bool
|
||||
locationPrefix string
|
||||
username string
|
||||
secret string
|
||||
}
|
||||
|
||||
func (u *uploadableMockRegistry) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if u.secret != "" {
|
||||
user, pass, ok := r.BasicAuth()
|
||||
if !ok || user != u.username || pass != u.secret {
|
||||
w.Header().Add("WWW-Authenticate", "basic realm=test")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
if r.Method == http.MethodPut && u.putHandlerFunc != nil {
|
||||
// if true return the response witout calling default handler
|
||||
if u.putHandlerFunc(w, r) {
|
||||
return
|
||||
}
|
||||
}
|
||||
u.defaultHandler(w, r)
|
||||
}
|
||||
|
||||
func (u *uploadableMockRegistry) defaultHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == http.MethodPost {
|
||||
if matches := blobUploadRegexp.FindStringSubmatch(r.URL.Path); len(matches) != 0 {
|
||||
if u.uploadable {
|
||||
w.Header().Set("Location", u.locationPrefix+"/upload")
|
||||
} else {
|
||||
w.Header().Set("Location", u.locationPrefix+"/cannotupload")
|
||||
}
|
||||
|
||||
dgstr := digest.Canonical.Digester()
|
||||
|
||||
if _, err := io.Copy(dgstr.Hash(), r.Body); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
query := r.URL.Query()
|
||||
if query.Has("mount") && query.Get("from") == "always-mount" {
|
||||
w.Header().Set("Docker-Content-Digest", dgstr.Digest().String())
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
return
|
||||
}
|
||||
|
||||
u.availableContents = append(u.availableContents, dgstr.Digest().String())
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
return
|
||||
}
|
||||
} else if r.Method == http.MethodPut {
|
||||
mfstMatches := manifestRegexp.FindStringSubmatch(r.URL.Path)
|
||||
if len(mfstMatches) != 0 || strings.HasPrefix(r.URL.Path, "/upload") {
|
||||
dgstr := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(dgstr.Hash(), r.Body); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
u.availableContents = append(u.availableContents, dgstr.Digest().String())
|
||||
w.Header().Set("Docker-Content-Digest", dgstr.Digest().String())
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
return
|
||||
} else if r.URL.Path == "/cannotupload" {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else if r.Method == http.MethodHead {
|
||||
var content string
|
||||
// check for both manifest and blob paths
|
||||
if manifestMatch := manifestRegexp.FindStringSubmatch(r.URL.Path); len(manifestMatch) == 3 {
|
||||
content = manifestMatch[2]
|
||||
} else if blobMatch := blobUploadRegexp.FindStringSubmatch(r.URL.Path); len(blobMatch) == 3 {
|
||||
content = blobMatch[2]
|
||||
}
|
||||
// if content is not found or if the path is not manifest or blob
|
||||
// we return 404
|
||||
if u.isContentAlreadyExist(content) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
return
|
||||
}
|
||||
fmt.Println(r)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
||||
// checks if the content is already present in the registry
|
||||
func (u *uploadableMockRegistry) isContentAlreadyExist(c string) bool {
|
||||
for _, ct := range u.availableContents {
|
||||
if ct == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Test_dockerPusher_push(t *testing.T) {
|
||||
|
||||
p, reg, tracker, done := samplePusher(t)
|
||||
defer done()
|
||||
|
||||
reg.uploadable = true
|
||||
|
||||
manifestContent := []byte("manifest-content")
|
||||
manifestContentDigest := digest.FromBytes(manifestContent)
|
||||
layerContent := []byte("layer-content")
|
||||
layerContentDigest := digest.FromBytes(layerContent)
|
||||
|
||||
// using a random object here
|
||||
baseObject := "latest@sha256:55d31f3af94c797b65b310569803cacc1c9f4a34bf61afcdc8138f89345c8308"
|
||||
|
||||
type args struct {
|
||||
content []byte
|
||||
mediatype string
|
||||
ref string
|
||||
unavailableOnFail bool
|
||||
annotations map[string]string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
dp dockerPusher
|
||||
dockerBaseObject string
|
||||
args args
|
||||
checkerFunc func(writer *pushWriter) bool
|
||||
wantErr error
|
||||
wantStatus *PushStatus
|
||||
}{
|
||||
{
|
||||
name: "when a manifest is pushed",
|
||||
dp: p,
|
||||
dockerBaseObject: baseObject,
|
||||
args: args{
|
||||
content: manifestContent,
|
||||
mediatype: ocispec.MediaTypeImageManifest,
|
||||
ref: fmt.Sprintf("manifest-%s", manifestContentDigest.String()),
|
||||
unavailableOnFail: false,
|
||||
},
|
||||
checkerFunc: func(writer *pushWriter) bool {
|
||||
select {
|
||||
case resp := <-writer.respC:
|
||||
// 201 should be the response code when uploading a new manifest
|
||||
return resp.StatusCode == http.StatusCreated
|
||||
case <-writer.errC:
|
||||
return false
|
||||
}
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "trying to push content that already exists",
|
||||
dp: p,
|
||||
dockerBaseObject: baseObject,
|
||||
args: args{
|
||||
content: manifestContent,
|
||||
mediatype: ocispec.MediaTypeImageManifest,
|
||||
ref: fmt.Sprintf("manifest-%s", manifestContentDigest.String()),
|
||||
unavailableOnFail: false,
|
||||
},
|
||||
wantErr: fmt.Errorf("content %v on remote: %w", digest.FromBytes(manifestContent), errdefs.ErrAlreadyExists),
|
||||
wantStatus: &PushStatus{
|
||||
Exists: true,
|
||||
MountedFrom: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success cross-repo mount a blob layer",
|
||||
dp: p,
|
||||
// Not needed to set the base object as it is used to generate path only in case of manifests
|
||||
// dockerBaseObject:
|
||||
args: args{
|
||||
content: layerContent,
|
||||
mediatype: ocispec.MediaTypeImageLayer,
|
||||
ref: fmt.Sprintf("layer2-%s", layerContentDigest.String()),
|
||||
unavailableOnFail: false,
|
||||
annotations: map[string]string{
|
||||
distributionSourceLabelKey("example.com"): "always-mount",
|
||||
},
|
||||
},
|
||||
checkerFunc: func(writer *pushWriter) bool {
|
||||
select {
|
||||
case resp := <-writer.respC:
|
||||
// 201 should be the response code when uploading a new blob
|
||||
return resp.StatusCode == http.StatusCreated
|
||||
case <-writer.errC:
|
||||
return false
|
||||
}
|
||||
},
|
||||
wantErr: fmt.Errorf("content %v on remote: %w", digest.FromBytes(layerContent), errdefs.ErrAlreadyExists),
|
||||
wantStatus: &PushStatus{
|
||||
MountedFrom: "example.com/always-mount",
|
||||
Exists: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "failed to cross-repo mount a blob layer",
|
||||
dp: p,
|
||||
// Not needed to set the base object as it is used to generate path only in case of manifests
|
||||
// dockerBaseObject:
|
||||
args: args{
|
||||
content: layerContent,
|
||||
mediatype: ocispec.MediaTypeImageLayer,
|
||||
ref: fmt.Sprintf("layer3-%s", layerContentDigest.String()),
|
||||
unavailableOnFail: false,
|
||||
annotations: map[string]string{
|
||||
distributionSourceLabelKey("example.com"): "never-mount",
|
||||
},
|
||||
},
|
||||
checkerFunc: func(writer *pushWriter) bool {
|
||||
select {
|
||||
case resp := <-writer.respC:
|
||||
// 201 should be the response code when uploading a new blob
|
||||
return resp.StatusCode == http.StatusCreated
|
||||
case <-writer.errC:
|
||||
return false
|
||||
}
|
||||
},
|
||||
wantErr: nil,
|
||||
wantStatus: &PushStatus{
|
||||
MountedFrom: "",
|
||||
Exists: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "trying to push a blob layer",
|
||||
dp: p,
|
||||
// Not needed to set the base object as it is used to generate path only in case of manifests
|
||||
// dockerBaseObject:
|
||||
args: args{
|
||||
content: layerContent,
|
||||
mediatype: ocispec.MediaTypeImageLayer,
|
||||
ref: fmt.Sprintf("layer-%s", layerContentDigest.String()),
|
||||
unavailableOnFail: false,
|
||||
},
|
||||
checkerFunc: func(writer *pushWriter) bool {
|
||||
select {
|
||||
case resp := <-writer.respC:
|
||||
// 201 should be the response code when uploading a new blob
|
||||
return resp.StatusCode == http.StatusCreated
|
||||
case <-writer.errC:
|
||||
return false
|
||||
}
|
||||
},
|
||||
wantErr: nil,
|
||||
wantStatus: &PushStatus{
|
||||
MountedFrom: "",
|
||||
Exists: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: test.args.mediatype,
|
||||
Digest: digest.FromBytes(test.args.content),
|
||||
Size: int64(len(test.args.content)),
|
||||
Annotations: test.args.annotations,
|
||||
}
|
||||
|
||||
test.dp.object = test.dockerBaseObject
|
||||
|
||||
got, err := test.dp.push(context.Background(), desc, test.args.ref, test.args.unavailableOnFail)
|
||||
|
||||
assert.Equal(t, test.wantErr, err)
|
||||
|
||||
if test.wantStatus != nil {
|
||||
status, err := tracker.GetStatus(test.args.ref)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *test.wantStatus, status.PushStatus)
|
||||
}
|
||||
|
||||
// if an error is expected, further comparisons are not required.
|
||||
if test.wantErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// write the content to the writer, this will be done when a Read() is called on the body of the request
|
||||
got.Write(test.args.content)
|
||||
|
||||
pw, ok := got.(*pushWriter)
|
||||
if !ok {
|
||||
assert.Errorf(t, errors.New("unable to cast content.Writer to pushWriter"), "got %v instead of pushwriter", got)
|
||||
}
|
||||
|
||||
// test whether a proper response has been received after the push operation
|
||||
assert.True(t, test.checkerFunc(pw))
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
244
core/remotes/docker/registry.go
Normal file
244
core/remotes/docker/registry.go
Normal file
@@ -0,0 +1,244 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HostCapabilities represent the capabilities of the registry
|
||||
// host. This also represents the set of operations for which
|
||||
// the registry host may be trusted to perform.
|
||||
//
|
||||
// For example pushing is a capability which should only be
|
||||
// performed on an upstream source, not a mirror.
|
||||
// Resolving (the process of converting a name into a digest)
|
||||
// must be considered a trusted operation and only done by
|
||||
// a host which is trusted (or more preferably by secure process
|
||||
// which can prove the provenance of the mapping). A public
|
||||
// mirror should never be trusted to do a resolve action.
|
||||
//
|
||||
// | Registry Type | Pull | Resolve | Push |
|
||||
// |------------------|------|---------|------|
|
||||
// | Public Registry | yes | yes | yes |
|
||||
// | Private Registry | yes | yes | yes |
|
||||
// | Public Mirror | yes | no | no |
|
||||
// | Private Mirror | yes | yes | no |
|
||||
type HostCapabilities uint8
|
||||
|
||||
const (
|
||||
// HostCapabilityPull represents the capability to fetch manifests
|
||||
// and blobs by digest
|
||||
HostCapabilityPull HostCapabilities = 1 << iota
|
||||
|
||||
// HostCapabilityResolve represents the capability to fetch manifests
|
||||
// by name
|
||||
HostCapabilityResolve
|
||||
|
||||
// HostCapabilityPush represents the capability to push blobs and
|
||||
// manifests
|
||||
HostCapabilityPush
|
||||
|
||||
// Reserved for future capabilities (i.e. search, catalog, remove)
|
||||
)
|
||||
|
||||
// Has checks whether the capabilities list has the provide capability
|
||||
func (c HostCapabilities) Has(t HostCapabilities) bool {
|
||||
return c&t == t
|
||||
}
|
||||
|
||||
// RegistryHost represents a complete configuration for a registry
|
||||
// host, representing the capabilities, authorizations, connection
|
||||
// configuration, and location.
|
||||
type RegistryHost struct {
|
||||
Client *http.Client
|
||||
Authorizer Authorizer
|
||||
Host string
|
||||
Scheme string
|
||||
Path string
|
||||
Capabilities HostCapabilities
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
func (h RegistryHost) isProxy(refhost string) bool {
|
||||
if refhost != h.Host {
|
||||
if refhost != "docker.io" || h.Host != "registry-1.docker.io" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RegistryHosts fetches the registry hosts for a given namespace,
|
||||
// provided by the host component of an distribution image reference.
|
||||
type RegistryHosts func(string) ([]RegistryHost, error)
|
||||
|
||||
// Registries joins multiple registry configuration functions, using the same
|
||||
// order as provided within the arguments. When an empty registry configuration
|
||||
// is returned with a nil error, the next function will be called.
|
||||
// NOTE: This function will not join configurations, as soon as a non-empty
|
||||
// configuration is returned from a configuration function, it will be returned
|
||||
// to the caller.
|
||||
func Registries(registries ...RegistryHosts) RegistryHosts {
|
||||
return func(host string) ([]RegistryHost, error) {
|
||||
for _, registry := range registries {
|
||||
config, err := registry(host)
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
if len(config) > 0 {
|
||||
return config, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
type registryOpts struct {
|
||||
authorizer Authorizer
|
||||
plainHTTP func(string) (bool, error)
|
||||
host func(string) (string, error)
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// RegistryOpt defines a registry default option
|
||||
type RegistryOpt func(*registryOpts)
|
||||
|
||||
// WithPlainHTTP configures registries to use plaintext http scheme
|
||||
// for the provided host match function.
|
||||
func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt {
|
||||
return func(opts *registryOpts) {
|
||||
opts.plainHTTP = f
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthorizer configures the default authorizer for a registry
|
||||
func WithAuthorizer(a Authorizer) RegistryOpt {
|
||||
return func(opts *registryOpts) {
|
||||
opts.authorizer = a
|
||||
}
|
||||
}
|
||||
|
||||
// WithHostTranslator defines the default translator to use for registry hosts
|
||||
func WithHostTranslator(h func(string) (string, error)) RegistryOpt {
|
||||
return func(opts *registryOpts) {
|
||||
opts.host = h
|
||||
}
|
||||
}
|
||||
|
||||
// WithClient configures the default http client for a registry
|
||||
func WithClient(c *http.Client) RegistryOpt {
|
||||
return func(opts *registryOpts) {
|
||||
opts.client = c
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigureDefaultRegistries is used to create a default configuration for
|
||||
// registries. For more advanced configurations or per-domain setups,
|
||||
// the RegistryHosts interface should be used directly.
|
||||
// NOTE: This function will always return a non-empty value or error
|
||||
func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts {
|
||||
var opts registryOpts
|
||||
for _, opt := range ropts {
|
||||
opt(&opts)
|
||||
}
|
||||
|
||||
return func(host string) ([]RegistryHost, error) {
|
||||
config := RegistryHost{
|
||||
Client: opts.client,
|
||||
Authorizer: opts.authorizer,
|
||||
Host: host,
|
||||
Scheme: "https",
|
||||
Path: "/v2",
|
||||
Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush,
|
||||
}
|
||||
|
||||
if config.Client == nil {
|
||||
config.Client = http.DefaultClient
|
||||
}
|
||||
|
||||
if opts.plainHTTP != nil {
|
||||
match, err := opts.plainHTTP(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
config.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
if opts.host != nil {
|
||||
var err error
|
||||
config.Host, err = opts.host(config.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if host == "docker.io" {
|
||||
config.Host = "registry-1.docker.io"
|
||||
}
|
||||
|
||||
return []RegistryHost{config}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// MatchAllHosts is a host match function which is always true.
|
||||
func MatchAllHosts(string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// MatchLocalhost is a host match function which returns true for
|
||||
// localhost.
|
||||
//
|
||||
// Note: this does not handle matching of ip addresses in octal,
|
||||
// decimal or hex form.
|
||||
func MatchLocalhost(host string) (bool, error) {
|
||||
switch {
|
||||
case host == "::1":
|
||||
return true, nil
|
||||
case host == "[::1]":
|
||||
return true, nil
|
||||
}
|
||||
h, p, err := net.SplitHostPort(host)
|
||||
|
||||
// addrError helps distinguish between errors of form
|
||||
// "no colon in address" and "too many colons in address".
|
||||
// The former is fine as the host string need not have a
|
||||
// port. Latter needs to be handled.
|
||||
addrError := &net.AddrError{
|
||||
Err: "missing port in address",
|
||||
Addr: host,
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != addrError.Error() {
|
||||
return false, err
|
||||
}
|
||||
// host string without any port specified
|
||||
h = host
|
||||
} else if len(p) == 0 {
|
||||
return false, errors.New("invalid host name format")
|
||||
}
|
||||
|
||||
// use ipv4 dotted decimal for further checking
|
||||
if h == "localhost" {
|
||||
h = "127.0.0.1"
|
||||
}
|
||||
ip := net.ParseIP(h)
|
||||
|
||||
return ip.IsLoopback(), nil
|
||||
}
|
||||
81
core/remotes/docker/registry_test.go
Normal file
81
core/remotes/docker/registry_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestHasCapability(t *testing.T) {
|
||||
var (
|
||||
pull = HostCapabilityPull
|
||||
rslv = HostCapabilityResolve
|
||||
push = HostCapabilityPush
|
||||
all = pull | rslv | push
|
||||
)
|
||||
for i, tc := range []struct {
|
||||
c HostCapabilities
|
||||
t HostCapabilities
|
||||
e bool
|
||||
}{
|
||||
{all, pull, true},
|
||||
{all, pull | rslv, true},
|
||||
{all, pull | push, true},
|
||||
{all, all, true},
|
||||
{pull, all, false},
|
||||
{pull, push, false},
|
||||
{rslv, pull, false},
|
||||
{pull | rslv, push, false},
|
||||
{pull | rslv, rslv, true},
|
||||
} {
|
||||
if a := tc.c.Has(tc.t); a != tc.e {
|
||||
t.Fatalf("%d: failed, expected %t, got %t", i, tc.e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchLocalhost(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
host string
|
||||
match bool
|
||||
}{
|
||||
{"", false},
|
||||
{"127.1.1.1", true},
|
||||
{"127.0.0.1", true},
|
||||
{"127.256.0.1", false}, // test MatchLocalhost does not panic on invalid ip
|
||||
{"127.23.34.52", true},
|
||||
{"127.0.0.1:5000", true},
|
||||
{"registry.org", false},
|
||||
{"126.example.com", false},
|
||||
{"localhost", true},
|
||||
{"localhost:5000", true},
|
||||
{"[127:0:0:1]", false},
|
||||
{"[::1]", true},
|
||||
{"[::1]:", false}, // invalid ip
|
||||
{"127.0.1.1:", false}, // invalid ip
|
||||
{"[::1]:5000", true},
|
||||
{"::1", true},
|
||||
} {
|
||||
actual, _ := MatchLocalhost(tc.host)
|
||||
if actual != tc.match {
|
||||
if tc.match {
|
||||
t.Logf("Expected match for %s", tc.host)
|
||||
} else {
|
||||
t.Logf("Unexpected match for %s", tc.host)
|
||||
}
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
739
core/remotes/docker/resolver.go
Normal file
739
core/remotes/docker/resolver.go
Normal file
@@ -0,0 +1,739 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||
remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/containerd/v2/reference"
|
||||
"github.com/containerd/containerd/v2/tracing"
|
||||
"github.com/containerd/containerd/v2/version"
|
||||
"github.com/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidAuthorization is used when credentials are passed to a server but
|
||||
// those credentials are rejected.
|
||||
ErrInvalidAuthorization = errors.New("authorization failed")
|
||||
|
||||
// MaxManifestSize represents the largest size accepted from a registry
|
||||
// during resolution. Larger manifests may be accepted using a
|
||||
// resolution method other than the registry.
|
||||
//
|
||||
// NOTE: The max supported layers by some runtimes is 128 and individual
|
||||
// layers will not contribute more than 256 bytes, making a
|
||||
// reasonable limit for a large image manifests of 32K bytes.
|
||||
// 4M bytes represents a much larger upper bound for images which may
|
||||
// contain large annotations or be non-images. A proper manifest
|
||||
// design puts large metadata in subobjects, as is consistent the
|
||||
// intent of the manifest design.
|
||||
MaxManifestSize int64 = 4 * 1048 * 1048
|
||||
)
|
||||
|
||||
// Authorizer is used to authorize HTTP requests based on 401 HTTP responses.
|
||||
// An Authorizer is responsible for caching tokens or credentials used by
|
||||
// requests.
|
||||
type Authorizer interface {
|
||||
// Authorize sets the appropriate `Authorization` header on the given
|
||||
// request.
|
||||
//
|
||||
// If no authorization is found for the request, the request remains
|
||||
// unmodified. It may also add an `Authorization` header as
|
||||
// "bearer <some bearer token>"
|
||||
// "basic <base64 encoded credentials>"
|
||||
//
|
||||
// It may return remotes/errors.ErrUnexpectedStatus, which for example,
|
||||
// can be used by the caller to find out the status code returned by the registry.
|
||||
Authorize(context.Context, *http.Request) error
|
||||
|
||||
// AddResponses adds a 401 response for the authorizer to consider when
|
||||
// authorizing requests. The last response should be unauthorized and
|
||||
// the previous requests are used to consider redirects and retries
|
||||
// that may have led to the 401.
|
||||
//
|
||||
// If response is not handled, returns `ErrNotImplemented`
|
||||
AddResponses(context.Context, []*http.Response) error
|
||||
}
|
||||
|
||||
// ResolverOptions are used to configured a new Docker register resolver
|
||||
type ResolverOptions struct {
|
||||
// Hosts returns registry host configurations for a namespace.
|
||||
Hosts RegistryHosts
|
||||
|
||||
// Headers are the HTTP request header fields sent by the resolver
|
||||
Headers http.Header
|
||||
|
||||
// Tracker is used to track uploads to the registry. This is used
|
||||
// since the registry does not have upload tracking and the existing
|
||||
// mechanism for getting blob upload status is expensive.
|
||||
Tracker StatusTracker
|
||||
|
||||
// Authorizer is used to authorize registry requests
|
||||
//
|
||||
// Deprecated: use Hosts.
|
||||
Authorizer Authorizer
|
||||
|
||||
// Credentials provides username and secret given a host.
|
||||
// If username is empty but a secret is given, that secret
|
||||
// is interpreted as a long lived token.
|
||||
//
|
||||
// Deprecated: use Hosts.
|
||||
Credentials func(string) (string, string, error)
|
||||
|
||||
// Host provides the hostname given a namespace.
|
||||
//
|
||||
// Deprecated: use Hosts.
|
||||
Host func(string) (string, error)
|
||||
|
||||
// PlainHTTP specifies to use plain http and not https
|
||||
//
|
||||
// Deprecated: use Hosts.
|
||||
PlainHTTP bool
|
||||
|
||||
// Client is the http client to used when making registry requests
|
||||
//
|
||||
// Deprecated: use Hosts.
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
// DefaultHost is the default host function.
|
||||
func DefaultHost(ns string) (string, error) {
|
||||
if ns == "docker.io" {
|
||||
return "registry-1.docker.io", nil
|
||||
}
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
type dockerResolver struct {
|
||||
hosts RegistryHosts
|
||||
header http.Header
|
||||
resolveHeader http.Header
|
||||
tracker StatusTracker
|
||||
}
|
||||
|
||||
// NewResolver returns a new resolver to a Docker registry
|
||||
func NewResolver(options ResolverOptions) remotes.Resolver {
|
||||
if options.Tracker == nil {
|
||||
options.Tracker = NewInMemoryTracker()
|
||||
}
|
||||
|
||||
if options.Headers == nil {
|
||||
options.Headers = make(http.Header)
|
||||
} else {
|
||||
// make a copy of the headers to avoid race due to concurrent map write
|
||||
options.Headers = options.Headers.Clone()
|
||||
}
|
||||
if _, ok := options.Headers["User-Agent"]; !ok {
|
||||
options.Headers.Set("User-Agent", "containerd/"+version.Version)
|
||||
}
|
||||
|
||||
resolveHeader := http.Header{}
|
||||
if _, ok := options.Headers["Accept"]; !ok {
|
||||
// set headers for all the types we support for resolution.
|
||||
resolveHeader.Set("Accept", strings.Join([]string{
|
||||
images.MediaTypeDockerSchema2Manifest,
|
||||
images.MediaTypeDockerSchema2ManifestList,
|
||||
ocispec.MediaTypeImageManifest,
|
||||
ocispec.MediaTypeImageIndex, "*/*",
|
||||
}, ", "))
|
||||
} else {
|
||||
resolveHeader["Accept"] = options.Headers["Accept"]
|
||||
delete(options.Headers, "Accept")
|
||||
}
|
||||
|
||||
if options.Hosts == nil {
|
||||
opts := []RegistryOpt{}
|
||||
if options.Host != nil {
|
||||
opts = append(opts, WithHostTranslator(options.Host))
|
||||
}
|
||||
|
||||
if options.Authorizer == nil {
|
||||
options.Authorizer = NewDockerAuthorizer(
|
||||
WithAuthClient(options.Client),
|
||||
WithAuthHeader(options.Headers),
|
||||
WithAuthCreds(options.Credentials))
|
||||
}
|
||||
opts = append(opts, WithAuthorizer(options.Authorizer))
|
||||
|
||||
if options.Client != nil {
|
||||
opts = append(opts, WithClient(options.Client))
|
||||
}
|
||||
if options.PlainHTTP {
|
||||
opts = append(opts, WithPlainHTTP(MatchAllHosts))
|
||||
} else {
|
||||
opts = append(opts, WithPlainHTTP(MatchLocalhost))
|
||||
}
|
||||
options.Hosts = ConfigureDefaultRegistries(opts...)
|
||||
}
|
||||
return &dockerResolver{
|
||||
hosts: options.Hosts,
|
||||
header: options.Headers,
|
||||
resolveHeader: resolveHeader,
|
||||
tracker: options.Tracker,
|
||||
}
|
||||
}
|
||||
|
||||
func getManifestMediaType(resp *http.Response) string {
|
||||
// Strip encoding data (manifests should always be ascii JSON)
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if sp := strings.IndexByte(contentType, ';'); sp != -1 {
|
||||
contentType = contentType[0:sp]
|
||||
}
|
||||
|
||||
// As of Apr 30 2019 the registry.access.redhat.com registry does not specify
|
||||
// the content type of any data but uses schema1 manifests.
|
||||
if contentType == "text/plain" {
|
||||
contentType = images.MediaTypeDockerSchema1Manifest
|
||||
}
|
||||
return contentType
|
||||
}
|
||||
|
||||
type countingReader struct {
|
||||
reader io.Reader
|
||||
bytesRead int64
|
||||
}
|
||||
|
||||
func (r *countingReader) Read(p []byte) (int, error) {
|
||||
n, err := r.reader.Read(p)
|
||||
r.bytesRead += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
var _ remotes.Resolver = &dockerResolver{}
|
||||
|
||||
func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) {
|
||||
base, err := r.resolveDockerBase(ref)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
refspec := base.refspec
|
||||
if refspec.Object == "" {
|
||||
return "", ocispec.Descriptor{}, reference.ErrObjectRequired
|
||||
}
|
||||
|
||||
var (
|
||||
paths [][]string
|
||||
dgst = refspec.Digest()
|
||||
caps = HostCapabilityPull
|
||||
)
|
||||
|
||||
if dgst != "" {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
// need to fail here, since we can't actually resolve the invalid
|
||||
// digest.
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
// turns out, we have a valid digest, make a url.
|
||||
paths = append(paths, []string{"manifests", dgst.String()})
|
||||
|
||||
// fallback to blobs on not found.
|
||||
paths = append(paths, []string{"blobs", dgst.String()})
|
||||
} else {
|
||||
// Add
|
||||
paths = append(paths, []string{"manifests", refspec.Object})
|
||||
caps |= HostCapabilityResolve
|
||||
}
|
||||
|
||||
hosts := base.filterHosts(caps)
|
||||
if len(hosts) == 0 {
|
||||
return "", ocispec.Descriptor{}, fmt.Errorf("no resolve hosts: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
ctx, err = ContextWithRepositoryScope(ctx, refspec, false)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
// firstErr is the most relevant error encountered during resolution.
|
||||
// We use this to determine the error to return, making sure that the
|
||||
// error created furthest through the resolution process is returned.
|
||||
firstErr error
|
||||
firstErrPriority int
|
||||
)
|
||||
for _, u := range paths {
|
||||
for _, host := range hosts {
|
||||
ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host))
|
||||
|
||||
req := base.request(host, http.MethodHead, u...)
|
||||
if err := req.addNamespace(base.refspec.Hostname()); err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
for key, value := range r.resolveHeader {
|
||||
req.header[key] = append(req.header[key], value...)
|
||||
}
|
||||
|
||||
log.G(ctx).Debug("resolving")
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrInvalidAuthorization) {
|
||||
err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err)
|
||||
}
|
||||
if firstErrPriority < 1 {
|
||||
firstErr = err
|
||||
firstErrPriority = 1
|
||||
}
|
||||
log.G(ctx).WithError(err).Info("trying next host")
|
||||
continue // try another host
|
||||
}
|
||||
resp.Body.Close() // don't care about body contents.
|
||||
|
||||
if resp.StatusCode > 299 {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
if firstErrPriority < 2 {
|
||||
firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound)
|
||||
firstErrPriority = 2
|
||||
}
|
||||
log.G(ctx).Info("trying next host - response was http.StatusNotFound")
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode > 399 {
|
||||
if firstErrPriority < 3 {
|
||||
firstErr = remoteerrors.NewUnexpectedStatusErr(resp)
|
||||
firstErrPriority = 3
|
||||
}
|
||||
log.G(ctx).Infof("trying next host - response was %s", resp.Status)
|
||||
continue // try another host
|
||||
}
|
||||
return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp)
|
||||
}
|
||||
size := resp.ContentLength
|
||||
contentType := getManifestMediaType(resp)
|
||||
|
||||
// if no digest was provided, then only a resolve
|
||||
// trusted registry was contacted, in this case use
|
||||
// the digest header (or content from GET)
|
||||
if dgst == "" {
|
||||
// this is the only point at which we trust the registry. we use the
|
||||
// content headers to assemble a descriptor for the name. when this becomes
|
||||
// more robust, we mostly get this information from a secure trust store.
|
||||
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
|
||||
|
||||
if dgstHeader != "" && size != -1 {
|
||||
if err := dgstHeader.Validate(); err != nil {
|
||||
return "", ocispec.Descriptor{}, fmt.Errorf("%q in header not a valid digest: %w", dgstHeader, err)
|
||||
}
|
||||
dgst = dgstHeader
|
||||
}
|
||||
}
|
||||
if dgst == "" || size == -1 {
|
||||
log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead")
|
||||
|
||||
req = base.request(host, http.MethodGet, u...)
|
||||
if err := req.addNamespace(base.refspec.Hostname()); err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
for key, value := range r.resolveHeader {
|
||||
req.header[key] = append(req.header[key], value...)
|
||||
}
|
||||
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
bodyReader := countingReader{reader: resp.Body}
|
||||
|
||||
contentType = getManifestMediaType(resp)
|
||||
err = func() error {
|
||||
defer resp.Body.Close()
|
||||
if dgst != "" {
|
||||
_, err = io.Copy(io.Discard, &bodyReader)
|
||||
return err
|
||||
}
|
||||
|
||||
if contentType == images.MediaTypeDockerSchema1Manifest {
|
||||
b, err := schema1.ReadStripSignature(&bodyReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dgst = digest.FromBytes(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
dgst, err = digest.FromReader(&bodyReader)
|
||||
return err
|
||||
}()
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
size = bodyReader.bytesRead
|
||||
}
|
||||
// Prevent resolving to excessively large manifests
|
||||
if size > MaxManifestSize {
|
||||
if firstErrPriority < 4 {
|
||||
firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound)
|
||||
firstErrPriority = 4
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: dgst,
|
||||
MediaType: contentType,
|
||||
Size: size,
|
||||
}
|
||||
|
||||
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
|
||||
return ref, desc, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If above loop terminates without return or error, then no registries
|
||||
// were provided.
|
||||
if firstErr == nil {
|
||||
firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return "", ocispec.Descriptor{}, firstErr
|
||||
}
|
||||
|
||||
func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
|
||||
base, err := r.resolveDockerBase(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dockerFetcher{
|
||||
dockerBase: base,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
|
||||
base, err := r.resolveDockerBase(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dockerPusher{
|
||||
dockerBase: base,
|
||||
object: base.refspec.Object,
|
||||
tracker: r.tracker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) {
|
||||
refspec, err := reference.Parse(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r.base(refspec)
|
||||
}
|
||||
|
||||
type dockerBase struct {
|
||||
refspec reference.Spec
|
||||
repository string
|
||||
hosts []RegistryHost
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
|
||||
host := refspec.Hostname()
|
||||
hosts, err := r.hosts(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dockerBase{
|
||||
refspec: refspec,
|
||||
repository: strings.TrimPrefix(refspec.Locator, host+"/"),
|
||||
hosts: hosts,
|
||||
header: r.header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) {
|
||||
for _, host := range r.hosts {
|
||||
if host.Capabilities.Has(caps) {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request {
|
||||
header := r.header.Clone()
|
||||
if header == nil {
|
||||
header = http.Header{}
|
||||
}
|
||||
|
||||
for key, value := range host.Header {
|
||||
header[key] = append(header[key], value...)
|
||||
}
|
||||
parts := append([]string{"/", host.Path, r.repository}, ps...)
|
||||
p := path.Join(parts...)
|
||||
// Join strips trailing slash, re-add ending "/" if included
|
||||
if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") {
|
||||
p = p + "/"
|
||||
}
|
||||
return &request{
|
||||
method: method,
|
||||
path: p,
|
||||
header: header,
|
||||
host: host,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *request) authorize(ctx context.Context, req *http.Request) error {
|
||||
// Check if has header for host
|
||||
if r.host.Authorizer != nil {
|
||||
if err := r.host.Authorizer.Authorize(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *request) addNamespace(ns string) (err error) {
|
||||
if !r.host.isProxy(ns) {
|
||||
return nil
|
||||
}
|
||||
var q url.Values
|
||||
// Parse query
|
||||
if i := strings.IndexByte(r.path, '?'); i > 0 {
|
||||
r.path = r.path[:i+1]
|
||||
q, err = url.ParseQuery(r.path[i+1:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
r.path = r.path + "?"
|
||||
q = url.Values{}
|
||||
}
|
||||
q.Add("ns", ns)
|
||||
|
||||
r.path = r.path + q.Encode()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type request struct {
|
||||
method string
|
||||
path string
|
||||
header http.Header
|
||||
host RegistryHost
|
||||
body func() (io.ReadCloser, error)
|
||||
size int64
|
||||
}
|
||||
|
||||
func (r *request) do(ctx context.Context) (*http.Response, error) {
|
||||
u := r.host.Scheme + "://" + r.host.Host + r.path
|
||||
req, err := http.NewRequestWithContext(ctx, r.method, u, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.header == nil {
|
||||
req.Header = http.Header{}
|
||||
} else {
|
||||
req.Header = r.header.Clone() // headers need to be copied to avoid concurrent map access
|
||||
}
|
||||
if r.body != nil {
|
||||
body, err := r.body()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Body = body
|
||||
req.GetBody = r.body
|
||||
if r.size > 0 {
|
||||
req.ContentLength = r.size
|
||||
}
|
||||
}
|
||||
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u))
|
||||
log.G(ctx).WithFields(requestFields(req)).Debug("do request")
|
||||
if err := r.authorize(ctx, req); err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
if r.host.Client != nil {
|
||||
*client = *r.host.Client
|
||||
}
|
||||
if client.CheckRedirect == nil {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
if err := r.authorize(ctx, req); err != nil {
|
||||
return fmt.Errorf("failed to authorize redirect: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "HTTPRequest"))
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to do request: %w", err)
|
||||
}
|
||||
log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) {
|
||||
resp, err := r.do(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
responses = append(responses, resp)
|
||||
retry, err := r.retryRequest(ctx, responses)
|
||||
if err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
if retry {
|
||||
resp.Body.Close()
|
||||
return r.doWithRetries(ctx, responses)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) {
|
||||
if len(responses) > 5 {
|
||||
return false, nil
|
||||
}
|
||||
last := responses[len(responses)-1]
|
||||
switch last.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
|
||||
if r.host.Authorizer != nil {
|
||||
if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil {
|
||||
return true, nil
|
||||
} else if !errdefs.IsNotImplemented(err) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
case http.StatusMethodNotAllowed:
|
||||
// Support registries which have not properly implemented the HEAD method for
|
||||
// manifests endpoint
|
||||
if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") {
|
||||
r.method = http.MethodGet
|
||||
return true, nil
|
||||
}
|
||||
case http.StatusRequestTimeout, http.StatusTooManyRequests:
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TODO: Handle 50x errors accounting for attempt history
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *request) String() string {
|
||||
return r.host.Scheme + "://" + r.host.Host + r.path
|
||||
}
|
||||
|
||||
func requestFields(req *http.Request) log.Fields {
|
||||
fields := map[string]interface{}{
|
||||
"request.method": req.Method,
|
||||
}
|
||||
for k, vals := range req.Header {
|
||||
k = strings.ToLower(k)
|
||||
if k == "authorization" {
|
||||
continue
|
||||
}
|
||||
for i, v := range vals {
|
||||
field := "request.header." + k
|
||||
if i > 0 {
|
||||
field = fmt.Sprintf("%s.%d", field, i)
|
||||
}
|
||||
fields[field] = v
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func responseFields(resp *http.Response) log.Fields {
|
||||
fields := map[string]interface{}{
|
||||
"response.status": resp.Status,
|
||||
}
|
||||
for k, vals := range resp.Header {
|
||||
k = strings.ToLower(k)
|
||||
for i, v := range vals {
|
||||
field := "response.header." + k
|
||||
if i > 0 {
|
||||
field = fmt.Sprintf("%s.%d", field, i)
|
||||
}
|
||||
fields[field] = v
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// IsLocalhost checks if the registry host is local.
|
||||
func IsLocalhost(host string) bool {
|
||||
if h, _, err := net.SplitHostPort(host); err == nil {
|
||||
host = h
|
||||
}
|
||||
|
||||
if host == "localhost" {
|
||||
return true
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
return ip.IsLoopback()
|
||||
}
|
||||
|
||||
// HTTPFallback is an http.RoundTripper which allows fallback from https to http
|
||||
// for registry endpoints with configurations for both http and TLS, such as
|
||||
// defaulted localhost endpoints.
|
||||
type HTTPFallback struct {
|
||||
http.RoundTripper
|
||||
}
|
||||
|
||||
func (f HTTPFallback) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
resp, err := f.RoundTripper.RoundTrip(r)
|
||||
var tlsErr tls.RecordHeaderError
|
||||
if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" {
|
||||
// server gave HTTP response to HTTPS client
|
||||
plainHTTPUrl := *r.URL
|
||||
plainHTTPUrl.Scheme = "http"
|
||||
|
||||
plainHTTPRequest := *r
|
||||
plainHTTPRequest.URL = &plainHTTPUrl
|
||||
|
||||
return f.RoundTripper.RoundTrip(&plainHTTPRequest)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
1012
core/remotes/docker/resolver_test.go
Normal file
1012
core/remotes/docker/resolver_test.go
Normal file
File diff suppressed because it is too large
Load Diff
608
core/remotes/docker/schema1/converter.go
Normal file
608
core/remotes/docker/schema1/converter.go
Normal file
@@ -0,0 +1,608 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1.
|
||||
//
|
||||
// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
|
||||
package schema1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/archive/compression"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/containerd/v2/labels"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
manifestSizeLimit = 8e6 // 8MB
|
||||
labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer"
|
||||
)
|
||||
|
||||
type blobState struct {
|
||||
diffID digest.Digest
|
||||
empty bool
|
||||
}
|
||||
|
||||
// Converter converts schema1 manifests to schema2 on fetch
|
||||
type Converter struct {
|
||||
contentStore content.Store
|
||||
fetcher remotes.Fetcher
|
||||
|
||||
pulledManifest *manifest
|
||||
|
||||
mu sync.Mutex
|
||||
blobMap map[digest.Digest]blobState
|
||||
layerBlobs map[digest.Digest]ocispec.Descriptor
|
||||
}
|
||||
|
||||
// NewConverter returns a new converter
|
||||
func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter {
|
||||
return &Converter{
|
||||
contentStore: contentStore,
|
||||
fetcher: fetcher,
|
||||
blobMap: map[digest.Digest]blobState{},
|
||||
layerBlobs: map[digest.Digest]ocispec.Descriptor{},
|
||||
}
|
||||
}
|
||||
|
||||
// Handle fetching descriptors for a docker media type
|
||||
func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema1Manifest:
|
||||
if err := c.fetchManifest(ctx, desc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := c.pulledManifest
|
||||
if len(m.FSLayers) != len(m.History) {
|
||||
return nil, errors.New("invalid schema 1 manifest, history and layer mismatch")
|
||||
}
|
||||
descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers))
|
||||
|
||||
for i := range m.FSLayers {
|
||||
if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok {
|
||||
empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do no attempt to download a known empty blob
|
||||
if !empty {
|
||||
descs = append([]ocispec.Descriptor{
|
||||
{
|
||||
MediaType: images.MediaTypeDockerSchema2LayerGzip,
|
||||
Digest: c.pulledManifest.FSLayers[i].BlobSum,
|
||||
Size: -1,
|
||||
},
|
||||
}, descs...)
|
||||
}
|
||||
c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{
|
||||
empty: empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
return descs, nil
|
||||
case images.MediaTypeDockerSchema2LayerGzip:
|
||||
if c.pulledManifest == nil {
|
||||
return nil, errors.New("manifest required for schema 1 blob pull")
|
||||
}
|
||||
return nil, c.fetchBlob(ctx, desc)
|
||||
default:
|
||||
return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertOptions provides options on converting a docker schema1 manifest.
|
||||
type ConvertOptions struct {
|
||||
// ManifestMediaType specifies the media type of the manifest OCI descriptor.
|
||||
ManifestMediaType string
|
||||
|
||||
// ConfigMediaType specifies the media type of the manifest config OCI
|
||||
// descriptor.
|
||||
ConfigMediaType string
|
||||
}
|
||||
|
||||
// ConvertOpt allows configuring a convert operation.
|
||||
type ConvertOpt func(context.Context, *ConvertOptions) error
|
||||
|
||||
// UseDockerSchema2 is used to indicate that a schema1 manifest should be
|
||||
// converted into the media types for a docker schema2 manifest.
|
||||
func UseDockerSchema2() ConvertOpt {
|
||||
return func(ctx context.Context, o *ConvertOptions) error {
|
||||
o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest
|
||||
o.ConfigMediaType = images.MediaTypeDockerSchema2Config
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a docker manifest to an OCI descriptor
|
||||
func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) {
|
||||
co := ConvertOptions{
|
||||
ManifestMediaType: ocispec.MediaTypeImageManifest,
|
||||
ConfigMediaType: ocispec.MediaTypeImageConfig,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, &co); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
|
||||
history, diffIDs, err := c.schema1ManifestHistory()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err)
|
||||
}
|
||||
|
||||
var img ocispec.Image
|
||||
if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err)
|
||||
}
|
||||
|
||||
img.History = history
|
||||
img.RootFS = ocispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: diffIDs,
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(img, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
|
||||
}
|
||||
|
||||
config := ocispec.Descriptor{
|
||||
MediaType: co.ConfigMediaType,
|
||||
Digest: digest.Canonical.FromBytes(b),
|
||||
Size: int64(len(b)),
|
||||
}
|
||||
|
||||
layers := make([]ocispec.Descriptor, len(diffIDs))
|
||||
for i, diffID := range diffIDs {
|
||||
layers[i] = c.layerBlobs[diffID]
|
||||
}
|
||||
|
||||
manifest := ocispec.Manifest{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Config: config,
|
||||
Layers: layers,
|
||||
}
|
||||
|
||||
mb, err := json.MarshalIndent(manifest, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: co.ManifestMediaType,
|
||||
Digest: digest.Canonical.FromBytes(mb),
|
||||
Size: int64(len(mb)),
|
||||
}
|
||||
|
||||
labels := map[string]string{}
|
||||
labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String()
|
||||
for i, ch := range manifest.Layers {
|
||||
labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String()
|
||||
}
|
||||
|
||||
ref := remotes.MakeRefKey(ctx, desc)
|
||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err)
|
||||
}
|
||||
|
||||
ref = remotes.MakeRefKey(ctx, config)
|
||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err)
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// ReadStripSignature reads in a schema1 manifest and returns a byte array
|
||||
// with the "signatures" field stripped
|
||||
func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) {
|
||||
b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stripSignature(b)
|
||||
}
|
||||
|
||||
func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error {
|
||||
log.G(ctx).Debug("fetch schema 1")
|
||||
|
||||
rc, err := c.fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err := ReadStripSignature(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var m manifest
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(m.Manifests) != 0 || len(m.Layers) != 0 {
|
||||
return errors.New("converter: expected schema1 document but found extra keys")
|
||||
}
|
||||
c.pulledManifest = &m
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error {
|
||||
log.G(ctx).Debug("fetch blob")
|
||||
|
||||
var (
|
||||
ref = remotes.MakeRefKey(ctx, desc)
|
||||
calc = newBlobStateCalculator()
|
||||
compressMethod = compression.Gzip
|
||||
)
|
||||
|
||||
// size may be unknown, set to zero for content ingest
|
||||
ingestDesc := desc
|
||||
if ingestDesc.Size == -1 {
|
||||
ingestDesc.Size = 0
|
||||
}
|
||||
|
||||
cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc))
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
reuse, err := c.reuseLabelBlobState(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reuse {
|
||||
return nil
|
||||
}
|
||||
|
||||
ra, err := c.contentStore.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
r, err := compression.DecompressStream(content.NewReader(ra))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compressMethod = r.GetCompression()
|
||||
_, err = io.Copy(calc, r)
|
||||
r.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
defer cw.Close()
|
||||
|
||||
rc, err := c.fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
eg.Go(func() error {
|
||||
r, err := compression.DecompressStream(pr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compressMethod = r.GetCompression()
|
||||
_, err = io.Copy(calc, r)
|
||||
r.Close()
|
||||
pr.CloseWithError(err)
|
||||
return err
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
defer pw.Close()
|
||||
|
||||
return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest)
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if desc.Size == -1 {
|
||||
info, err := c.contentStore.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get blob info: %w", err)
|
||||
}
|
||||
desc.Size = info.Size
|
||||
}
|
||||
|
||||
if compressMethod == compression.Uncompressed {
|
||||
log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob")
|
||||
desc.MediaType = images.MediaTypeDockerSchema2Layer
|
||||
}
|
||||
|
||||
state := calc.State()
|
||||
|
||||
cinfo := content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{
|
||||
labels.LabelUncompressed: state.diffID.String(),
|
||||
labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty),
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil {
|
||||
return fmt.Errorf("failed to update uncompressed label: %w", err)
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.blobMap[desc.Digest] = state
|
||||
c.layerBlobs[state.diffID] = desc
|
||||
c.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) {
|
||||
cinfo, err := c.contentStore.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get blob info: %w", err)
|
||||
}
|
||||
desc.Size = cinfo.Size
|
||||
|
||||
diffID, ok := cinfo.Labels[labels.LabelUncompressed]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
isEmpty, err := strconv.ParseBool(emptyVal)
|
||||
if err != nil {
|
||||
log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
bState := blobState{empty: isEmpty}
|
||||
|
||||
if bState.diffID, err = digest.Parse(diffID); err != nil {
|
||||
log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// NOTE: there is no need to read header to get compression method
|
||||
// because there are only two kinds of methods.
|
||||
if bState.diffID == desc.Digest {
|
||||
desc.MediaType = images.MediaTypeDockerSchema2Layer
|
||||
} else {
|
||||
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.blobMap[desc.Digest] = bState
|
||||
c.layerBlobs[bState.diffID] = desc
|
||||
c.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) {
|
||||
if c.pulledManifest == nil {
|
||||
return nil, nil, errors.New("missing schema 1 manifest for conversion")
|
||||
}
|
||||
m := *c.pulledManifest
|
||||
|
||||
if len(m.History) == 0 {
|
||||
return nil, nil, errors.New("no history")
|
||||
}
|
||||
|
||||
history := make([]ocispec.History, len(m.History))
|
||||
diffIDs := []digest.Digest{}
|
||||
for i := range m.History {
|
||||
var h v1History
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err)
|
||||
}
|
||||
|
||||
blobSum := m.FSLayers[i].BlobSum
|
||||
|
||||
state := c.blobMap[blobSum]
|
||||
|
||||
history[len(history)-i-1] = ocispec.History{
|
||||
Author: h.Author,
|
||||
Comment: h.Comment,
|
||||
Created: &h.Created,
|
||||
CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "),
|
||||
EmptyLayer: state.empty,
|
||||
}
|
||||
|
||||
if !state.empty {
|
||||
diffIDs = append([]digest.Digest{state.diffID}, diffIDs...)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return history, diffIDs, nil
|
||||
}
|
||||
|
||||
type fsLayer struct {
|
||||
BlobSum digest.Digest `json:"blobSum"`
|
||||
}
|
||||
|
||||
type history struct {
|
||||
V1Compatibility string `json:"v1Compatibility"`
|
||||
}
|
||||
|
||||
type manifest struct {
|
||||
FSLayers []fsLayer `json:"fsLayers"`
|
||||
History []history `json:"history"`
|
||||
Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest
|
||||
Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index
|
||||
}
|
||||
|
||||
type v1History struct {
|
||||
Author string `json:"author,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
ThrowAway *bool `json:"throwaway,omitempty"`
|
||||
Size *int `json:"Size,omitempty"` // used before ThrowAway field
|
||||
ContainerConfig struct {
|
||||
Cmd []string `json:"Cmd,omitempty"`
|
||||
} `json:"container_config,omitempty"`
|
||||
}
|
||||
|
||||
// isEmptyLayer returns whether the v1 compatibility history describes an
|
||||
// empty layer. A return value of true indicates the layer is empty,
|
||||
// however false does not indicate non-empty.
|
||||
func isEmptyLayer(compatHistory []byte) (bool, error) {
|
||||
var h v1History
|
||||
if err := json.Unmarshal(compatHistory, &h); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if h.ThrowAway != nil {
|
||||
return *h.ThrowAway, nil
|
||||
}
|
||||
if h.Size != nil {
|
||||
return *h.Size == 0, nil
|
||||
}
|
||||
|
||||
// If no `Size` or `throwaway` field is given, then
|
||||
// it cannot be determined whether the layer is empty
|
||||
// from the history, return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type signature struct {
|
||||
Signatures []jsParsedSignature `json:"signatures"`
|
||||
}
|
||||
|
||||
type jsParsedSignature struct {
|
||||
Protected string `json:"protected"`
|
||||
}
|
||||
|
||||
type protectedBlock struct {
|
||||
Length int `json:"formatLength"`
|
||||
Tail string `json:"formatTail"`
|
||||
}
|
||||
|
||||
// joseBase64UrlDecode decodes the given string using the standard base64 url
|
||||
// decoder but first adds the appropriate number of trailing '=' characters in
|
||||
// accordance with the jose specification.
|
||||
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||
func joseBase64UrlDecode(s string) ([]byte, error) {
|
||||
switch len(s) % 4 {
|
||||
case 0:
|
||||
case 2:
|
||||
s += "=="
|
||||
case 3:
|
||||
s += "="
|
||||
default:
|
||||
return nil, errors.New("illegal base64url string")
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
func stripSignature(b []byte) ([]byte, error) {
|
||||
var sig signature
|
||||
if err := json.Unmarshal(b, &sig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(sig.Signatures) == 0 {
|
||||
return nil, errors.New("no signatures")
|
||||
}
|
||||
pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err)
|
||||
}
|
||||
|
||||
var protected protectedBlock
|
||||
if err := json.Unmarshal(pb, &protected); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if protected.Length > len(b) {
|
||||
return nil, errors.New("invalid protected length block")
|
||||
}
|
||||
|
||||
tail, err := joseBase64UrlDecode(protected.Tail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid tail base 64 value: %w", err)
|
||||
}
|
||||
|
||||
return append(b[:protected.Length], tail...), nil
|
||||
}
|
||||
|
||||
type blobStateCalculator struct {
|
||||
empty bool
|
||||
digester digest.Digester
|
||||
}
|
||||
|
||||
func newBlobStateCalculator() *blobStateCalculator {
|
||||
return &blobStateCalculator{
|
||||
empty: true,
|
||||
digester: digest.Canonical.Digester(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blobStateCalculator) Write(p []byte) (int, error) {
|
||||
if c.empty {
|
||||
for _, b := range p {
|
||||
if b != 0x00 {
|
||||
c.empty = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.digester.Hash().Write(p)
|
||||
}
|
||||
|
||||
func (c *blobStateCalculator) State() blobState {
|
||||
return blobState{
|
||||
empty: c.empty,
|
||||
diffID: c.digester.Digest(),
|
||||
}
|
||||
}
|
||||
101
core/remotes/docker/scope.go
Normal file
101
core/remotes/docker/scope.go
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/v2/reference"
|
||||
)
|
||||
|
||||
// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull"
|
||||
// for "host/foo/bar:baz".
|
||||
// When push is true, both pull and push are added to the scope.
|
||||
func RepositoryScope(refspec reference.Spec, push bool) (string, error) {
|
||||
u, err := url.Parse("dummy://" + refspec.Locator)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull"
|
||||
if push {
|
||||
s += ",push"
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// tokenScopesKey is used for the key for context.WithValue().
|
||||
// value: []string (e.g. {"registry:foo/bar:pull"})
|
||||
type tokenScopesKey struct{}
|
||||
|
||||
// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value.
|
||||
func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) {
|
||||
s, err := RepositoryScope(refspec, push)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WithScope(ctx, s), nil
|
||||
}
|
||||
|
||||
// WithScope appends a custom registry auth scope to the context.
|
||||
func WithScope(ctx context.Context, scope string) context.Context {
|
||||
var scopes []string
|
||||
if v := ctx.Value(tokenScopesKey{}); v != nil {
|
||||
scopes = v.([]string)
|
||||
scopes = append(scopes, scope)
|
||||
} else {
|
||||
scopes = []string{scope}
|
||||
}
|
||||
return context.WithValue(ctx, tokenScopesKey{}, scopes)
|
||||
}
|
||||
|
||||
// ContextWithAppendPullRepositoryScope is used to append repository pull
|
||||
// scope into existing scopes indexed by the tokenScopesKey{}.
|
||||
func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context {
|
||||
return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo))
|
||||
}
|
||||
|
||||
// GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes.
|
||||
func GetTokenScopes(ctx context.Context, common []string) []string {
|
||||
scopes := []string{}
|
||||
if x := ctx.Value(tokenScopesKey{}); x != nil {
|
||||
scopes = append(scopes, x.([]string)...)
|
||||
}
|
||||
|
||||
scopes = append(scopes, common...)
|
||||
sort.Strings(scopes)
|
||||
|
||||
if len(scopes) == 0 {
|
||||
return scopes
|
||||
}
|
||||
|
||||
l := 0
|
||||
for idx := 1; idx < len(scopes); idx++ {
|
||||
// Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
|
||||
// So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
|
||||
if scopes[l] == scopes[idx] {
|
||||
continue
|
||||
}
|
||||
|
||||
l++
|
||||
scopes[l] = scopes[idx]
|
||||
}
|
||||
return scopes[:l+1]
|
||||
}
|
||||
110
core/remotes/docker/scope_test.go
Normal file
110
core/remotes/docker/scope_test.go
Normal file
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/v2/reference"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRepositoryScope(t *testing.T) {
|
||||
testCases := []struct {
|
||||
refspec reference.Spec
|
||||
push bool
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
refspec: reference.Spec{
|
||||
Locator: "host/foo/bar",
|
||||
Object: "ignored",
|
||||
},
|
||||
push: false,
|
||||
expected: "repository:foo/bar:pull",
|
||||
},
|
||||
{
|
||||
refspec: reference.Spec{
|
||||
Locator: "host:4242/foo/bar",
|
||||
Object: "ignored",
|
||||
},
|
||||
push: true,
|
||||
expected: "repository:foo/bar:pull,push",
|
||||
},
|
||||
}
|
||||
for _, x := range testCases {
|
||||
t.Run(x.refspec.String(), func(t *testing.T) {
|
||||
actual, err := RepositoryScope(x.refspec, x.push)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, x.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTokenScopes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
scopesInCtx []string
|
||||
commonScopes []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
scopesInCtx: []string{},
|
||||
commonScopes: []string{},
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
scopesInCtx: []string{},
|
||||
commonScopes: []string{"repository:foo/bar:pull"},
|
||||
expected: []string{"repository:foo/bar:pull"},
|
||||
},
|
||||
{
|
||||
scopesInCtx: []string{"repository:foo/bar:pull,push"},
|
||||
commonScopes: []string{},
|
||||
expected: []string{"repository:foo/bar:pull,push"},
|
||||
},
|
||||
{
|
||||
scopesInCtx: []string{"repository:foo/bar:pull"},
|
||||
commonScopes: []string{"repository:foo/bar:pull"},
|
||||
expected: []string{"repository:foo/bar:pull"},
|
||||
},
|
||||
{
|
||||
scopesInCtx: []string{"repository:foo/bar:pull"},
|
||||
commonScopes: []string{"repository:foo/bar:pull,push"},
|
||||
expected: []string{"repository:foo/bar:pull", "repository:foo/bar:pull,push"},
|
||||
},
|
||||
{
|
||||
scopesInCtx: []string{"repository:foo/bar:pull"},
|
||||
commonScopes: []string{"repository:foo/bar:pull,push", "repository:foo/bar:pull"},
|
||||
expected: []string{"repository:foo/bar:pull", "repository:foo/bar:pull,push"},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
ctx := context.WithValue(context.TODO(), tokenScopesKey{}, tc.scopesInCtx)
|
||||
actual := GetTokenScopes(ctx, tc.commonScopes)
|
||||
assert.Equal(t, tc.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustomScope(t *testing.T) {
|
||||
scope := "whatever:foo/bar:pull"
|
||||
ctx := WithScope(context.Background(), scope)
|
||||
ctx = ContextWithAppendPullRepositoryScope(ctx, "foo/bar")
|
||||
|
||||
scopes := GetTokenScopes(ctx, []string{})
|
||||
assert.Equal(t, []string{"repository:foo/bar:pull", scope}, scopes)
|
||||
}
|
||||
101
core/remotes/docker/status.go
Normal file
101
core/remotes/docker/status.go
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/moby/locker"
|
||||
)
|
||||
|
||||
// Status of a content operation
|
||||
type Status struct {
|
||||
content.Status
|
||||
|
||||
Committed bool
|
||||
|
||||
// ErrClosed contains error encountered on close.
|
||||
ErrClosed error
|
||||
|
||||
// UploadUUID is used by the Docker registry to reference blob uploads
|
||||
UploadUUID string
|
||||
|
||||
// PushStatus contains status related to push.
|
||||
PushStatus
|
||||
}
|
||||
|
||||
type PushStatus struct {
|
||||
// MountedFrom is the source content was cross-repo mounted from (empty if no cross-repo mount was performed).
|
||||
MountedFrom string
|
||||
|
||||
// Exists indicates whether content already exists in the repository and wasn't uploaded.
|
||||
Exists bool
|
||||
}
|
||||
|
||||
// StatusTracker to track status of operations
|
||||
type StatusTracker interface {
|
||||
GetStatus(string) (Status, error)
|
||||
SetStatus(string, Status)
|
||||
}
|
||||
|
||||
// StatusTrackLocker to track status of operations with lock
|
||||
type StatusTrackLocker interface {
|
||||
StatusTracker
|
||||
Lock(string)
|
||||
Unlock(string)
|
||||
}
|
||||
|
||||
type memoryStatusTracker struct {
|
||||
statuses map[string]Status
|
||||
m sync.Mutex
|
||||
locker *locker.Locker
|
||||
}
|
||||
|
||||
// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory
|
||||
func NewInMemoryTracker() StatusTrackLocker {
|
||||
return &memoryStatusTracker{
|
||||
statuses: map[string]Status{},
|
||||
locker: locker.New(),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) {
|
||||
t.m.Lock()
|
||||
defer t.m.Unlock()
|
||||
status, ok := t.statuses[ref]
|
||||
if !ok {
|
||||
return Status{}, fmt.Errorf("status for ref %v: %w", ref, errdefs.ErrNotFound)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (t *memoryStatusTracker) SetStatus(ref string, status Status) {
|
||||
t.m.Lock()
|
||||
t.statuses[ref] = status
|
||||
t.m.Unlock()
|
||||
}
|
||||
|
||||
func (t *memoryStatusTracker) Lock(ref string) {
|
||||
t.locker.Lock(ref)
|
||||
}
|
||||
|
||||
func (t *memoryStatusTracker) Unlock(ref string) {
|
||||
t.locker.Unlock(ref)
|
||||
}
|
||||
55
core/remotes/errors/errors.go
Normal file
55
core/remotes/errors/errors.go
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var _ error = ErrUnexpectedStatus{}
|
||||
|
||||
// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status
|
||||
type ErrUnexpectedStatus struct {
|
||||
Status string
|
||||
StatusCode int
|
||||
Body []byte
|
||||
RequestURL, RequestMethod string
|
||||
}
|
||||
|
||||
func (e ErrUnexpectedStatus) Error() string {
|
||||
return fmt.Sprintf("unexpected status from %s request to %s: %s", e.RequestMethod, e.RequestURL, e.Status)
|
||||
}
|
||||
|
||||
// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response
|
||||
func NewUnexpectedStatusErr(resp *http.Response) error {
|
||||
var b []byte
|
||||
if resp.Body != nil {
|
||||
b, _ = io.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
|
||||
}
|
||||
err := ErrUnexpectedStatus{
|
||||
Body: b,
|
||||
Status: resp.Status,
|
||||
StatusCode: resp.StatusCode,
|
||||
RequestMethod: resp.Request.Method,
|
||||
}
|
||||
if resp.Request.URL != nil {
|
||||
err.RequestURL = resp.Request.URL.String()
|
||||
}
|
||||
return err
|
||||
}
|
||||
394
core/remotes/handlers.go
Normal file
394
core/remotes/handlers.go
Normal file
@@ -0,0 +1,394 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/errdefs"
|
||||
"github.com/containerd/containerd/v2/labels"
|
||||
"github.com/containerd/containerd/v2/platforms"
|
||||
"github.com/containerd/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
type refKeyPrefix struct{}
|
||||
|
||||
// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing
|
||||
// data in the content store from the FetchHandler.
|
||||
//
|
||||
// Used in `MakeRefKey` to determine what the key prefix should be.
|
||||
func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context {
|
||||
var values map[string]string
|
||||
if v := ctx.Value(refKeyPrefix{}); v != nil {
|
||||
values = v.(map[string]string)
|
||||
} else {
|
||||
values = make(map[string]string)
|
||||
}
|
||||
|
||||
values[mediaType] = prefix
|
||||
return context.WithValue(ctx, refKeyPrefix{}, values)
|
||||
}
|
||||
|
||||
// MakeRefKey returns a unique reference for the descriptor. This reference can be
|
||||
// used to lookup ongoing processes related to the descriptor. This function
|
||||
// may look to the context to namespace the reference appropriately.
|
||||
func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
|
||||
key := desc.Digest.String()
|
||||
if desc.Annotations != nil {
|
||||
if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok {
|
||||
key = fmt.Sprintf("%s@%s", name, desc.Digest.String())
|
||||
}
|
||||
}
|
||||
|
||||
if v := ctx.Value(refKeyPrefix{}); v != nil {
|
||||
values := v.(map[string]string)
|
||||
if prefix := values[desc.MediaType]; prefix != "" {
|
||||
return prefix + "-" + key
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case images.IsManifestType(desc.MediaType):
|
||||
return "manifest-" + key
|
||||
case images.IsIndexType(desc.MediaType):
|
||||
return "index-" + key
|
||||
case images.IsLayerType(desc.MediaType):
|
||||
return "layer-" + key
|
||||
case images.IsKnownConfig(desc.MediaType):
|
||||
return "config-" + key
|
||||
default:
|
||||
log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType)
|
||||
return "unknown-" + key
|
||||
}
|
||||
}
|
||||
|
||||
// FetchHandler returns a handler that will fetch all content into the ingester
|
||||
// discovered in a call to Dispatch. Use with ChildrenHandler to do a full
|
||||
// recursive fetch.
|
||||
func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{
|
||||
"digest": desc.Digest,
|
||||
"mediatype": desc.MediaType,
|
||||
"size": desc.Size,
|
||||
}))
|
||||
|
||||
if desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
||||
return nil, fmt.Errorf("%v not supported", desc.MediaType)
|
||||
}
|
||||
err := Fetch(ctx, ingester, fetcher, desc)
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch fetches the given digest into the provided ingester
|
||||
func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error {
|
||||
log.G(ctx).Debug("fetch")
|
||||
|
||||
cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if desc.Size == 0 {
|
||||
// most likely a poorly configured registry/web front end which responded with no
|
||||
// Content-Length header; unable (not to mention useless) to commit a 0-length entry
|
||||
// into the content store. Error out here otherwise the error sent back is confusing
|
||||
return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
if ws.Offset == desc.Size {
|
||||
// If writer is already complete, commit and return
|
||||
err := cw.Commit(ctx, desc.Size, desc.Digest)
|
||||
if err != nil && !errdefs.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if desc.Size == int64(len(desc.Data)) {
|
||||
return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest)
|
||||
}
|
||||
|
||||
rc, err := fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
return content.Copy(ctx, cw, rc, desc.Size, desc.Digest)
|
||||
}
|
||||
|
||||
// PushHandler returns a handler that will push all content from the provider
|
||||
// using a writer from the pusher.
|
||||
func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{
|
||||
"digest": desc.Digest,
|
||||
"mediatype": desc.MediaType,
|
||||
"size": desc.Size,
|
||||
}))
|
||||
|
||||
err := push(ctx, provider, pusher, desc)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error {
|
||||
log.G(ctx).Debug("push")
|
||||
|
||||
var (
|
||||
cw content.Writer
|
||||
err error
|
||||
)
|
||||
if cs, ok := pusher.(content.Ingester); ok {
|
||||
cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc))
|
||||
} else {
|
||||
cw, err = pusher.Push(ctx, desc)
|
||||
}
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
ra, err := provider.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
rd := io.NewSectionReader(ra, 0, desc.Size)
|
||||
return content.Copy(ctx, cw, rd, desc.Size, desc.Digest)
|
||||
}
|
||||
|
||||
// PushContent pushes content specified by the descriptor from the provider.
|
||||
//
|
||||
// Base handlers can be provided which will be called before any push specific
|
||||
// handlers.
|
||||
//
|
||||
// If the passed in content.Provider is also a content.InfoProvider (such as
|
||||
// content.Manager) then this will also annotate the distribution sources using
|
||||
// labels prefixed with "containerd.io/distribution.source".
|
||||
func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error {
|
||||
|
||||
var m sync.Mutex
|
||||
manifests := []ocispec.Descriptor{}
|
||||
indexStack := []ocispec.Descriptor{}
|
||||
|
||||
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if images.IsManifestType(desc.MediaType) {
|
||||
m.Lock()
|
||||
manifests = append(manifests, desc)
|
||||
m.Unlock()
|
||||
return nil, images.ErrStopHandler
|
||||
} else if images.IsIndexType(desc.MediaType) {
|
||||
m.Lock()
|
||||
indexStack = append(indexStack, desc)
|
||||
m.Unlock()
|
||||
return nil, images.ErrStopHandler
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
pushHandler := PushHandler(pusher, store)
|
||||
|
||||
platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform)
|
||||
|
||||
var handler images.Handler
|
||||
if m, ok := store.(content.InfoProvider); ok {
|
||||
annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m)
|
||||
handler = images.Handlers(annotateHandler, filterHandler, pushHandler)
|
||||
} else {
|
||||
handler = images.Handlers(platformFilterhandler, filterHandler, pushHandler)
|
||||
}
|
||||
|
||||
if wrapper != nil {
|
||||
handler = wrapper(handler)
|
||||
}
|
||||
|
||||
if err := images.Dispatch(ctx, handler, limiter, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate in reverse order as seen, parent always uploaded after child
|
||||
for i := len(indexStack) - 1; i >= 0; i-- {
|
||||
err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i])
|
||||
if err != nil {
|
||||
// TODO(estesp): until we have a more complete method for index push, we need to report
|
||||
// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
|
||||
// as a marker for this problem
|
||||
if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") {
|
||||
return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable".
|
||||
// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed.
|
||||
//
|
||||
// This is based on the media type of the content:
|
||||
// - application/vnd.oci.image.layer.nondistributable
|
||||
// - application/vnd.docker.image.rootfs.foreign
|
||||
func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if images.IsNonDistributable(desc.MediaType) {
|
||||
log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob")
|
||||
return nil, images.ErrSkipDesc
|
||||
}
|
||||
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(children) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
out := make([]ocispec.Descriptor, 0, len(children))
|
||||
for _, child := range children {
|
||||
if !images.IsNonDistributable(child.MediaType) {
|
||||
out = append(out, child)
|
||||
} else {
|
||||
log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob")
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FilterManifestByPlatformHandler allows Handler to handle non-target
|
||||
// platform's manifest and configuration data.
|
||||
func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// no platform information
|
||||
if desc.Platform == nil || m == nil {
|
||||
return children, nil
|
||||
}
|
||||
|
||||
if images.IsManifestType(desc.MediaType) && !m.Match(*desc.Platform) {
|
||||
var descs []ocispec.Descriptor
|
||||
for _, child := range children {
|
||||
if images.IsConfigType(child.MediaType) {
|
||||
descs = append(descs, child)
|
||||
}
|
||||
}
|
||||
return descs, nil
|
||||
}
|
||||
return children, nil
|
||||
}
|
||||
}
|
||||
|
||||
// annotateDistributionSourceHandler add distribution source label into
|
||||
// annotation of config or blob descriptor.
|
||||
func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.InfoProvider) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Distribution source is only used for config or blob but may be inherited from
|
||||
// a manifest or manifest list
|
||||
if !images.IsManifestType(desc.MediaType) && !images.IsIndexType(desc.MediaType) {
|
||||
return children, nil
|
||||
}
|
||||
|
||||
parentSourceAnnotations := desc.Annotations
|
||||
var parentLabels map[string]string
|
||||
if pi, err := provider.Info(ctx, desc.Digest); err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
parentLabels = pi.Labels
|
||||
}
|
||||
|
||||
for i := range children {
|
||||
child := children[i]
|
||||
|
||||
info, err := provider.Info(ctx, child.Digest)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
copyDistributionSourceLabels(info.Labels, &child)
|
||||
|
||||
// Annotate with parent labels for cross repo mount or fetch.
|
||||
// Parent sources may apply to all children since most registries
|
||||
// enforce that children exist before the manifests.
|
||||
copyDistributionSourceLabels(parentSourceAnnotations, &child)
|
||||
copyDistributionSourceLabels(parentLabels, &child)
|
||||
|
||||
children[i] = child
|
||||
}
|
||||
return children, nil
|
||||
}
|
||||
}
|
||||
|
||||
func copyDistributionSourceLabels(from map[string]string, to *ocispec.Descriptor) {
|
||||
for k, v := range from {
|
||||
if !strings.HasPrefix(k, labels.LabelDistributionSource+".") {
|
||||
continue
|
||||
}
|
||||
|
||||
if to.Annotations == nil {
|
||||
to.Annotations = make(map[string]string)
|
||||
} else {
|
||||
// Only propagate the parent label if the child doesn't already have it.
|
||||
if _, has := to.Annotations[k]; has {
|
||||
continue
|
||||
}
|
||||
}
|
||||
to.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
218
core/remotes/handlers_test.go
Normal file
218
core/remotes/handlers_test.go
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotes
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "crypto/sha256"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func TestContextCustomKeyPrefix(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cmt := "testing/custom.media.type"
|
||||
ctx = WithMediaTypeKeyPrefix(ctx, images.MediaTypeDockerSchema2Layer, "bananas")
|
||||
ctx = WithMediaTypeKeyPrefix(ctx, cmt, "apples")
|
||||
|
||||
// makes sure that even though we've supplied some custom handling, the built-in still works
|
||||
t.Run("normal supported case", func(t *testing.T) {
|
||||
desc := ocispec.Descriptor{MediaType: ocispec.MediaTypeImageLayer}
|
||||
expected := "layer-"
|
||||
|
||||
actual := MakeRefKey(ctx, desc)
|
||||
if actual != expected {
|
||||
t.Fatalf("unexpected ref key, expected %s, got: %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("unknown media type", func(t *testing.T) {
|
||||
desc := ocispec.Descriptor{MediaType: "we.dont.know.what.this.is"}
|
||||
expected := "unknown-"
|
||||
|
||||
actual := MakeRefKey(ctx, desc)
|
||||
if actual != expected {
|
||||
t.Fatalf("unexpected ref key, expected %s, got: %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("overwrite supported media type", func(t *testing.T) {
|
||||
desc := ocispec.Descriptor{MediaType: images.MediaTypeDockerSchema2Layer}
|
||||
expected := "bananas-"
|
||||
|
||||
actual := MakeRefKey(ctx, desc)
|
||||
if actual != expected {
|
||||
t.Fatalf("unexpected ref key, expected %s, got: %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("custom media type", func(t *testing.T) {
|
||||
desc := ocispec.Descriptor{MediaType: cmt}
|
||||
expected := "apples-"
|
||||
|
||||
actual := MakeRefKey(ctx, desc)
|
||||
if actual != expected {
|
||||
t.Fatalf("unexpected ref key, expected %s, got: %s", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:staticcheck // Non-distributable layers are deprecated
|
||||
func TestSkipNonDistributableBlobs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
out, err := SkipNonDistributableBlobs(images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
return []ocispec.Descriptor{
|
||||
{MediaType: images.MediaTypeDockerSchema2Layer, Digest: "test:1"},
|
||||
{MediaType: images.MediaTypeDockerSchema2LayerForeign, Digest: "test:2"},
|
||||
{MediaType: images.MediaTypeDockerSchema2LayerForeignGzip, Digest: "test:3"},
|
||||
{MediaType: ocispec.MediaTypeImageLayerNonDistributable, Digest: "test:4"},
|
||||
{MediaType: ocispec.MediaTypeImageLayerNonDistributableGzip, Digest: "test:5"},
|
||||
{MediaType: ocispec.MediaTypeImageLayerNonDistributableZstd, Digest: "test:6"},
|
||||
}, nil
|
||||
}))(ctx, ocispec.Descriptor{MediaType: images.MediaTypeDockerSchema2Manifest})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(out) != 1 {
|
||||
t.Fatalf("unexpected number of descriptors returned: %d", len(out))
|
||||
}
|
||||
if out[0].Digest != "test:1" {
|
||||
t.Fatalf("unexpected digest returned: %s", out[0].Digest)
|
||||
}
|
||||
|
||||
dir := t.TempDir()
|
||||
cs, err := local.NewLabeledStore(dir, newMemoryLabelStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
write := func(i interface{}, ref string) digest.Digest {
|
||||
t.Helper()
|
||||
|
||||
data, err := json.Marshal(i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
w, err := cs.Writer(ctx, content.WithRef(ref))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
dgst := digest.SHA256.FromBytes(data)
|
||||
|
||||
n, err := w.Write(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := w.Commit(ctx, int64(n), dgst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return dgst
|
||||
}
|
||||
|
||||
configDigest := write(ocispec.ImageConfig{}, "config")
|
||||
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{Digest: configDigest, MediaType: ocispec.MediaTypeImageConfig},
|
||||
MediaType: ocispec.MediaTypeImageManifest,
|
||||
Layers: []ocispec.Descriptor{
|
||||
{MediaType: images.MediaTypeDockerSchema2Layer, Digest: "test:1"},
|
||||
{MediaType: images.MediaTypeDockerSchema2LayerForeign, Digest: "test:2"},
|
||||
{MediaType: images.MediaTypeDockerSchema2LayerForeignGzip, Digest: "test:3"},
|
||||
{MediaType: ocispec.MediaTypeImageLayerNonDistributable, Digest: "test:4"},
|
||||
{MediaType: ocispec.MediaTypeImageLayerNonDistributableGzip, Digest: "test:5"},
|
||||
{MediaType: ocispec.MediaTypeImageLayerNonDistributableZstd, Digest: "test:6"},
|
||||
},
|
||||
}
|
||||
|
||||
manifestDigest := write(manifest, "manifest")
|
||||
|
||||
out, err = SkipNonDistributableBlobs(images.ChildrenHandler(cs))(ctx, ocispec.Descriptor{MediaType: manifest.MediaType, Digest: manifestDigest})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(out) != 2 {
|
||||
t.Fatalf("unexpected number of descriptors returned: %v", out)
|
||||
}
|
||||
|
||||
if out[0].Digest != configDigest {
|
||||
t.Fatalf("unexpected digest returned: %v", out[0])
|
||||
}
|
||||
if out[1].Digest != manifest.Layers[0].Digest {
|
||||
t.Fatalf("unexpected digest returned: %v", out[1])
|
||||
}
|
||||
}
|
||||
|
||||
type memoryLabelStore struct {
|
||||
l sync.Mutex
|
||||
labels map[digest.Digest]map[string]string
|
||||
}
|
||||
|
||||
func newMemoryLabelStore() local.LabelStore {
|
||||
return &memoryLabelStore{
|
||||
labels: map[digest.Digest]map[string]string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (mls *memoryLabelStore) Get(d digest.Digest) (map[string]string, error) {
|
||||
mls.l.Lock()
|
||||
labels := mls.labels[d]
|
||||
mls.l.Unlock()
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func (mls *memoryLabelStore) Set(d digest.Digest, labels map[string]string) error {
|
||||
mls.l.Lock()
|
||||
mls.labels[d] = labels
|
||||
mls.l.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mls *memoryLabelStore) Update(d digest.Digest, update map[string]string) (map[string]string, error) {
|
||||
mls.l.Lock()
|
||||
labels, ok := mls.labels[d]
|
||||
if !ok {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
for k, v := range update {
|
||||
if v == "" {
|
||||
delete(labels, k)
|
||||
} else {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
mls.labels[d] = labels
|
||||
mls.l.Unlock()
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
111
core/remotes/resolver.go
Normal file
111
core/remotes/resolver.go
Normal file
@@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Resolver provides remotes based on a locator.
|
||||
type Resolver interface {
|
||||
// Resolve attempts to resolve the reference into a name and descriptor.
|
||||
//
|
||||
// The argument `ref` should be a scheme-less URI representing the remote.
|
||||
// Structurally, it has a host and path. The "host" can be used to directly
|
||||
// reference a specific host or be matched against a specific handler.
|
||||
//
|
||||
// The returned name should be used to identify the referenced entity.
|
||||
// Depending on the remote namespace, this may be immutable or mutable.
|
||||
// While the name may differ from ref, it should itself be a valid ref.
|
||||
//
|
||||
// If the resolution fails, an error will be returned.
|
||||
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error)
|
||||
|
||||
// Fetcher returns a new fetcher for the provided reference.
|
||||
// All content fetched from the returned fetcher will be
|
||||
// from the namespace referred to by ref.
|
||||
Fetcher(ctx context.Context, ref string) (Fetcher, error)
|
||||
|
||||
// Pusher returns a new pusher for the provided reference
|
||||
// The returned Pusher should satisfy content.Ingester and concurrent attempts
|
||||
// to push the same blob using the Ingester API should result in ErrUnavailable.
|
||||
Pusher(ctx context.Context, ref string) (Pusher, error)
|
||||
}
|
||||
|
||||
// Fetcher fetches content.
|
||||
// A fetcher implementation may implement the FetcherByDigest interface too.
|
||||
type Fetcher interface {
|
||||
// Fetch the resource identified by the descriptor.
|
||||
Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// FetcherByDigest fetches content by the digest.
|
||||
type FetcherByDigest interface {
|
||||
// FetchByDigest fetches the resource identified by the digest.
|
||||
//
|
||||
// FetcherByDigest usually returns an incomplete descriptor.
|
||||
// Typically, the media type is always set to "application/octet-stream",
|
||||
// and the annotations are unset.
|
||||
FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// Pusher pushes content
|
||||
type Pusher interface {
|
||||
// Push returns a content writer for the given resource identified
|
||||
// by the descriptor.
|
||||
Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error)
|
||||
}
|
||||
|
||||
// FetcherFunc allows package users to implement a Fetcher with just a
|
||||
// function.
|
||||
type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
|
||||
|
||||
// Fetch content
|
||||
func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
|
||||
return fn(ctx, desc)
|
||||
}
|
||||
|
||||
// PusherFunc allows package users to implement a Pusher with just a
|
||||
// function.
|
||||
type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error)
|
||||
|
||||
// Push content
|
||||
func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
|
||||
return fn(ctx, desc)
|
||||
}
|
||||
|
||||
// FetchByDigestConfig provides configuration for fetching content by digest
|
||||
type FetchByDigestConfig struct {
|
||||
//Mediatype specifies mediatype header to append for fetch request
|
||||
Mediatype string
|
||||
}
|
||||
|
||||
// FetchByDigestOpts allows callers to set options for fetch object
|
||||
type FetchByDigestOpts func(context.Context, *FetchByDigestConfig) error
|
||||
|
||||
// WithMediaType sets the media type header for fetch request
|
||||
func WithMediaType(mediatype string) FetchByDigestOpts {
|
||||
return func(ctx context.Context, cfg *FetchByDigestConfig) error {
|
||||
cfg.Mediatype = mediatype
|
||||
return nil
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user