Merge pull request #46 from Random-Liu/wait-image-pulling
Wait and check image pulling progress.
This commit is contained in:
commit
a49f66e0bb
146
Godeps/Godeps.json
generated
146
Godeps/Godeps.json
generated
@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/kubernetes-incubator/cri-containerd",
|
||||
"GoVersion": "go1.7",
|
||||
"GoVersion": "go1.8",
|
||||
"GodepVersion": "v79",
|
||||
"Packages": [
|
||||
"./..."
|
||||
@ -28,123 +28,127 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/content",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/execution",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/images",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/services/rootfs",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/types/container",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/types/descriptor",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/api/types/mount",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/archive",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/archive/compression",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/content",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/fs",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/images",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/log",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/plugin",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/reference",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/remotes",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/remotes/docker",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/rootfs",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/services/content",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/services/images",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/services/rootfs",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/snapshot",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/containerd/sys",
|
||||
"Comment": "v0.2.3-745-gbbeaab5",
|
||||
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355"
|
||||
"Comment": "v0.2.3-856-g193abed",
|
||||
"Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containerd/continuity/sysx",
|
||||
"Rev": "6414d06cab9e2fe082ea29ff42aab627e740d00c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/libcni",
|
||||
@ -173,12 +177,12 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/digestset",
|
||||
"Comment": "v2.6.0-rc.1-130-gb38e583",
|
||||
"Comment": "v2.6.0-rc.1-130-gb38e5838",
|
||||
"Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/reference",
|
||||
"Comment": "v2.6.0-rc.1-130-gb38e583",
|
||||
"Comment": "v2.6.0-rc.1-130-gb38e5838",
|
||||
"Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621"
|
||||
},
|
||||
{
|
||||
@ -203,27 +207,27 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/gogoproto",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Comment": "v0.3-150-gd2e1ade2",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Comment": "v0.3-150-gd2e1ade2",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Comment": "v0.3-150-gd2e1ade2",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/sortkeys",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Comment": "v0.3-150-gd2e1ade2",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/types",
|
||||
"Comment": "v0.3-150-gd2e1ade",
|
||||
"Comment": "v0.3-150-gd2e1ade2",
|
||||
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
|
||||
},
|
||||
{
|
||||
@ -267,7 +271,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
|
||||
"Comment": "v1.0.0-rc3-21-g50401b5",
|
||||
"Comment": "v1.0.0-rc3-21-g50401b5b",
|
||||
"Rev": "50401b5b4c2e01e4f1372b73a021742deeaf4e2d"
|
||||
},
|
||||
{
|
||||
@ -301,19 +305,15 @@
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stevvooe/continuity/sysx",
|
||||
"Rev": "2abd81dfcc64c06e1eb16a081fd187798f4c2919"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.1.4-6-g18a02ba",
|
||||
"Rev": "18a02ba4a312f95da08ff4cfc0055750ce50ae9e"
|
||||
"Comment": "v1.1.4",
|
||||
"Rev": "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Comment": "v1.1.4-6-g18a02ba",
|
||||
"Rev": "18a02ba4a312f95da08ff4cfc0055750ce50ae9e"
|
||||
"Comment": "v1.1.4",
|
||||
"Rev": "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||
@ -330,35 +330,35 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/idna",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/trace",
|
||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
||||
"Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sync/errgroup",
|
||||
|
@ -268,11 +268,11 @@ func (c *criContainerdService) getImageInfo(ctx context.Context, ref string) (
|
||||
normalizedRef, err)
|
||||
}
|
||||
// Get image config
|
||||
desc, err := image.Config(ctx, c.contentProvider)
|
||||
desc, err := image.Config(ctx, c.contentStoreService)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to get image config descriptor: %v", err)
|
||||
}
|
||||
rc, err := c.contentProvider.Reader(ctx, desc.Digest)
|
||||
rc, err := c.contentStoreService.Reader(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to get image config reader: %v", err)
|
||||
}
|
||||
@ -282,13 +282,13 @@ func (c *criContainerdService) getImageInfo(ctx context.Context, ref string) (
|
||||
return "", 0, nil, fmt.Errorf("failed to decode image config: %v", err)
|
||||
}
|
||||
// Get image chainID
|
||||
diffIDs, err := image.RootFS(ctx, c.contentProvider)
|
||||
diffIDs, err := image.RootFS(ctx, c.contentStoreService)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to get image diff ids: %v", err)
|
||||
}
|
||||
chainID := identity.ChainID(diffIDs)
|
||||
// Get image size
|
||||
size, err := image.Size(ctx, c.contentProvider)
|
||||
size, err := image.Size(ctx, c.contentStoreService)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to get image size: %v", err)
|
||||
}
|
||||
@ -325,7 +325,7 @@ func (c *criContainerdService) localResolve(ctx context.Context, ref string) (st
|
||||
return "", fmt.Errorf("an error occurred when getting image %q from containerd image store: %v",
|
||||
normalized.String(), err)
|
||||
}
|
||||
desc, err := image.Config(ctx, c.contentProvider)
|
||||
desc, err := image.Config(ctx, c.contentStoreService)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get image config descriptor: %v", err)
|
||||
}
|
||||
|
@ -17,13 +17,18 @@ limitations under the License.
|
||||
package server
|
||||
|
||||
import (
|
||||
gocontext "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
rootfsservice "github.com/containerd/containerd/services/rootfs"
|
||||
"github.com/golang/glog"
|
||||
imagedigest "github.com/opencontainers/go-digest"
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -72,7 +77,6 @@ import (
|
||||
// contents are missing but snapshots are ready, is the image still "READY"?
|
||||
|
||||
// PullImage pulls an image with authentication config.
|
||||
// TODO(mikebrow): add authentication
|
||||
// TODO(mikebrow): harden api (including figuring out at what layer we should be blocking on duplicate requests.)
|
||||
func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (retRes *runtime.PullImageResponse, retErr error) {
|
||||
glog.V(2).Infof("PullImage %q with auth config %+v", r.GetImage().GetImage(), r.GetAuth())
|
||||
@ -152,13 +156,46 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
|
||||
return &runtime.PullImageResponse{ImageRef: imageID}, err
|
||||
}
|
||||
|
||||
// resourceSet is the helper struct to help tracking all resources associated
|
||||
// with an image.
|
||||
type resourceSet struct {
|
||||
sync.Mutex
|
||||
resources map[string]struct{}
|
||||
}
|
||||
|
||||
func newResourceSet() *resourceSet {
|
||||
return &resourceSet{resources: make(map[string]struct{})}
|
||||
}
|
||||
|
||||
func (r *resourceSet) add(resource string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.resources[resource] = struct{}{}
|
||||
}
|
||||
|
||||
// all returns an array of all resources added.
|
||||
func (r *resourceSet) all() map[string]struct{} {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
resources := make(map[string]struct{})
|
||||
for resource := range r.resources {
|
||||
resources[resource] = struct{}{}
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
// pullImage pulls image and returns image id (config digest) and manifest digest.
|
||||
// The ref should be normalized image reference.
|
||||
// TODO(random-liu): [P0] Wait for all downloadings to be done before return.
|
||||
func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
|
||||
imagedigest.Digest, imagedigest.Digest, error) {
|
||||
// Resolve the image reference to get descriptor and fetcher.
|
||||
resolver := docker.NewResolver()
|
||||
resolver := docker.NewResolver(docker.ResolverOptions{
|
||||
// TODO(random-liu): Add authentication by setting credentials.
|
||||
// TODO(random-liu): Handle https.
|
||||
PlainHTTP: true,
|
||||
Client: http.DefaultClient,
|
||||
})
|
||||
_, desc, fetcher, err := resolver.Resolve(ctx, ref)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to resolve ref %q: %v", ref, err)
|
||||
@ -178,19 +215,37 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
|
||||
}
|
||||
// TODO(random-liu): What if following operations fail? Do we need to do cleanup?
|
||||
|
||||
resources := newResourceSet()
|
||||
|
||||
glog.V(4).Infof("Start downloading resources for image %q", ref)
|
||||
// Fetch all image resources into content store.
|
||||
// Dispatch a handler which will run a sequence of handlers to:
|
||||
// 1) fetch the object using a FetchHandler;
|
||||
// 2) recurse through any sub-layers via a ChildrenHandler.
|
||||
// 1) track all resources associated using a customized handler;
|
||||
// 2) fetch the object using a FetchHandler;
|
||||
// 3) recurse through any sub-layers via a ChildrenHandler.
|
||||
err = containerdimages.Dispatch(
|
||||
ctx,
|
||||
containerdimages.Handlers(
|
||||
remotes.FetchHandler(c.contentIngester, fetcher),
|
||||
containerdimages.ChildrenHandler(c.contentProvider)),
|
||||
containerdimages.HandlerFunc(func(ctx gocontext.Context, desc imagespec.Descriptor) (
|
||||
[]imagespec.Descriptor, error) {
|
||||
resources.add(remotes.MakeRefKey(ctx, desc))
|
||||
return nil, nil
|
||||
}),
|
||||
remotes.FetchHandler(c.contentStoreService, fetcher),
|
||||
containerdimages.ChildrenHandler(c.contentStoreService)),
|
||||
desc)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to fetch image %q desc %+v: %v", ref, desc, err)
|
||||
// Dispatch returns error when requested resources are locked.
|
||||
// In that case, we should start waiting and checking the pulling
|
||||
// progress.
|
||||
// TODO(random-liu): Check specific resource locked error type.
|
||||
glog.V(5).Infof("Dispatch for %q returns error: %v", ref, err)
|
||||
}
|
||||
// Wait for the image pulling to finish
|
||||
if err := c.waitForResourcesDownloading(ctx, resources.all()); err != nil {
|
||||
return "", "", fmt.Errorf("failed to wait for image %q downloading: %v", ref, err)
|
||||
}
|
||||
glog.V(4).Infof("Finished downloading resources for image %q", ref)
|
||||
|
||||
image, err := c.imageStoreService.Get(ctx, ref)
|
||||
if err != nil {
|
||||
@ -198,7 +253,7 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
|
||||
}
|
||||
// Read the image manifest from content store.
|
||||
manifestDigest := image.Target.Digest
|
||||
p, err := content.ReadBlob(ctx, c.contentProvider, manifestDigest)
|
||||
p, err := content.ReadBlob(ctx, c.contentStoreService, manifestDigest)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("readblob failed for manifest digest %q: %v", manifestDigest, err)
|
||||
}
|
||||
@ -209,18 +264,56 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
|
||||
}
|
||||
|
||||
// Unpack the image layers into snapshots.
|
||||
if _, err = c.rootfsUnpacker.Unpack(ctx, manifest.Layers); err != nil {
|
||||
rootfsUnpacker := rootfsservice.NewUnpackerFromClient(c.rootfsService)
|
||||
if _, err = rootfsUnpacker.Unpack(ctx, manifest.Layers); err != nil {
|
||||
return "", "", fmt.Errorf("unpack failed for manifest layers %+v: %v", manifest.Layers, err)
|
||||
}
|
||||
// TODO(random-liu): Considering how to deal with the disk usage of content.
|
||||
|
||||
configDesc, err := image.Config(ctx, c.contentProvider)
|
||||
configDesc, err := image.Config(ctx, c.contentStoreService)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get config descriptor for image %q: %v", ref, err)
|
||||
}
|
||||
return configDesc.Digest, manifestDigest, nil
|
||||
}
|
||||
|
||||
// waitDownloadingPollInterval is the interval to check resource downloading progress.
|
||||
const waitDownloadingPollInterval = 200 * time.Millisecond
|
||||
|
||||
// waitForResourcesDownloading waits for all resource downloading to finish.
|
||||
func (c *criContainerdService) waitForResourcesDownloading(ctx context.Context, resources map[string]struct{}) error {
|
||||
ticker := time.NewTicker(waitDownloadingPollInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// TODO(random-liu): Use better regexp when containerd `MakeRefKey` contains more
|
||||
// information.
|
||||
statuses, err := c.contentStoreService.Status(ctx, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get content status: %v", err)
|
||||
}
|
||||
pulling := false
|
||||
// TODO(random-liu): Move Dispatch into a separate goroutine, so that we could report
|
||||
// image pulling progress concurrently.
|
||||
for _, status := range statuses {
|
||||
_, ok := resources[status.Ref]
|
||||
if ok {
|
||||
glog.V(5).Infof("Pulling resource %q with progress %d/%d",
|
||||
status.Ref, status.Offset, status.Total)
|
||||
pulling = true
|
||||
}
|
||||
}
|
||||
if !pulling {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
// TODO(random-liu): Abort ongoing pulling if cancelled.
|
||||
return fmt.Errorf("image resources pulling is cancelled")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// insertToStringSlice is a helper function to insert a string into the string slice
|
||||
// if the string is not in the slice yet.
|
||||
func insertToStringSlice(ss []string, s string) []string {
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -72,3 +74,22 @@ func TestUpdateImageMetadata(t *testing.T) {
|
||||
assert.Equal(t, test.expectedRepoDigests, m.RepoDigests)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResources(t *testing.T) {
|
||||
const threads = 10
|
||||
var wg sync.WaitGroup
|
||||
r := newResourceSet()
|
||||
for i := 0; i < threads; i++ {
|
||||
wg.Add(1)
|
||||
go func(ref string) {
|
||||
r.add(ref)
|
||||
wg.Done()
|
||||
}(fmt.Sprintf("sha256:%d", i))
|
||||
}
|
||||
wg.Wait()
|
||||
refs := r.all()
|
||||
for i := 0; i < threads; i++ {
|
||||
_, ok := refs[fmt.Sprintf("sha256:%d", i)]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
@ -29,10 +29,8 @@ import (
|
||||
rootfsapi "github.com/containerd/containerd/api/services/rootfs"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
contentservice "github.com/containerd/containerd/services/content"
|
||||
imagesservice "github.com/containerd/containerd/services/images"
|
||||
rootfsservice "github.com/containerd/containerd/services/rootfs"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
|
||||
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata/store"
|
||||
@ -85,15 +83,10 @@ type criContainerdService struct {
|
||||
containerNameIndex *registrar.Registrar
|
||||
// containerService is containerd container service client.
|
||||
containerService execution.ContainerServiceClient
|
||||
// contentIngester is the containerd service to ingest content into
|
||||
// content store.
|
||||
contentIngester content.Ingester
|
||||
// contentProvider is the containerd service to get content from
|
||||
// content store.
|
||||
contentProvider content.Provider
|
||||
// rootfsUnpacker is the containerd service to unpack image content
|
||||
// into snapshots.
|
||||
rootfsUnpacker rootfs.Unpacker
|
||||
// contentStoreService is the containerd content service client..
|
||||
contentStoreService content.Store
|
||||
// rootfsService is the containerd rootfs service client.
|
||||
rootfsService rootfsapi.RootFSClient
|
||||
// imageStoreService is the containerd service to store and track
|
||||
// image metadata.
|
||||
imageStoreService images.Store
|
||||
@ -103,7 +96,6 @@ type criContainerdService struct {
|
||||
|
||||
// NewCRIContainerdService returns a new instance of CRIContainerdService
|
||||
func NewCRIContainerdService(conn *grpc.ClientConn, rootDir, networkPluginBinDir, networkPluginConfDir string) (CRIContainerdService, error) {
|
||||
// TODO: Initialize different containerd clients.
|
||||
// TODO(random-liu): [P2] Recover from runtime state and metadata store.
|
||||
c := &criContainerdService{
|
||||
os: osinterface.RealOS{},
|
||||
@ -119,9 +111,8 @@ func NewCRIContainerdService(conn *grpc.ClientConn, rootDir, networkPluginBinDir
|
||||
containerNameIndex: registrar.NewRegistrar(),
|
||||
containerService: execution.NewContainerServiceClient(conn),
|
||||
imageStoreService: imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)),
|
||||
contentIngester: contentservice.NewIngesterFromClient(contentapi.NewContentClient(conn)),
|
||||
contentProvider: contentservice.NewProviderFromClient(contentapi.NewContentClient(conn)),
|
||||
rootfsUnpacker: rootfsservice.NewUnpackerFromClient(rootfsapi.NewRootFSClient(conn)),
|
||||
contentStoreService: contentservice.NewStoreFromClient(contentapi.NewContentClient(conn)),
|
||||
rootfsService: rootfsapi.NewRootFSClient(conn),
|
||||
}
|
||||
|
||||
netPlugin, err := ocicni.InitCNI(networkPluginBinDir, networkPluginConfDir)
|
||||
|
@ -359,3 +359,15 @@ func (f *FakeExecutionClient) CloseStdin(ctx context.Context, closeStdinOpts *ex
|
||||
// TODO: implement CloseStdin()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Pause is a test implementation of execution.Pause
|
||||
func (f *FakeExecutionClient) Pause(ctx context.Context, in *execution.PauseRequest, opts ...grpc.CallOption) (*googleprotobuf.Empty, error) {
|
||||
// TODO: implement Pause()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Resume is a test implementation of execution.Resume
|
||||
func (f *FakeExecutionClient) Resume(ctx context.Context, in *execution.ResumeRequest, opts ...grpc.CallOption) (*googleprotobuf.Empty, error) {
|
||||
// TODO: implement Resume()
|
||||
return nil, nil
|
||||
}
|
||||
|
3
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
3
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
@ -29,9 +29,10 @@ install:
|
||||
script:
|
||||
- export GOOS=$TRAVIS_GOOS
|
||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
||||
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
||||
- make fmt
|
||||
- make vet
|
||||
- make binaries
|
||||
- TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
||||
- if [ "$GOOS" != "windows" ]; then make coverage ; fi
|
||||
- if [ "$GOOS" != "windows" ]; then sudo PATH=$PATH GOPATH=$GOPATH make root-coverage ; fi
|
||||
|
||||
|
17
vendor/github.com/containerd/containerd/MAINTAINERS
generated
vendored
17
vendor/github.com/containerd/containerd/MAINTAINERS
generated
vendored
@ -59,6 +59,23 @@ If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The candidate becomes a maintainer once the pull request is
|
||||
merged.
|
||||
"""
|
||||
|
||||
[Rules.adding-sub-projects]
|
||||
|
||||
title = "How are sub projects added?"
|
||||
|
||||
text = """
|
||||
Similar to adding maintainers, new sub projects can be added to containerd
|
||||
GitHub organization as long as they adhere to the CNCF
|
||||
[charter](https://www.cncf.io/about/charter/) and mission. After a project
|
||||
proposal has been announced on a public forum (GitHub issue or mailing list),
|
||||
the existing maintainers are given five business days to discuss the new
|
||||
project, raise objections and cast their vote. Projects must be approved by at
|
||||
least 66% of the current maintainers by adding their vote.
|
||||
|
||||
If a project is approved, a maintainer will add the project to the containerd
|
||||
GitHub organization, and make an announcement on a public forum.
|
||||
"""
|
||||
|
||||
[Rules.stepping-down-policy]
|
||||
|
4
vendor/github.com/containerd/containerd/Makefile
generated
vendored
4
vendor/github.com/containerd/containerd/Makefile
generated
vendored
@ -28,7 +28,7 @@ SNAPSHOT_PACKAGES=$(shell go list ./snapshot/...)
|
||||
|
||||
# Project binaries.
|
||||
COMMANDS=ctr containerd protoc-gen-gogoctrd dist ctrd-protobuild
|
||||
ifneq ("$(GOOS)", "windows")
|
||||
ifeq ("$(GOOS)", "linux")
|
||||
COMMANDS += containerd-shim
|
||||
endif
|
||||
BINARIES=$(addprefix bin/,$(COMMANDS))
|
||||
@ -79,7 +79,7 @@ checkprotos: protos ## check if protobufs needs to be generated again
|
||||
# imports
|
||||
vet: binaries ## run go vet
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(go vet ${PACKAGES} 2>&1 | grep -v 'constant [0-9]* not a string in call to Errorf' | egrep -v '(timestamp_test.go|duration_test.go|exit status 1)' | tee /dev/stderr)"
|
||||
@test -z "$$(go vet ${PACKAGES} 2>&1 | grep -v 'constant [0-9]* not a string in call to Errorf' | grep -v 'unrecognized printf verb 'r'' | egrep -v '(timestamp_test.go|duration_test.go|fetch.go|exit status 1)' | tee /dev/stderr)"
|
||||
|
||||
fmt: ## run go fmt
|
||||
@echo "$(WHALE) $@"
|
||||
|
1
vendor/github.com/containerd/containerd/README.md
generated
vendored
1
vendor/github.com/containerd/containerd/README.md
generated
vendored
@ -1,6 +1,7 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/containerd/containerd)
|
||||
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||
|
||||
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc..
|
||||
|
||||
|
1819
vendor/github.com/containerd/containerd/api/services/content/content.pb.go
generated
vendored
1819
vendor/github.com/containerd/containerd/api/services/content/content.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
77
vendor/github.com/containerd/containerd/api/services/content/content.proto
generated
vendored
77
vendor/github.com/containerd/containerd/api/services/content/content.proto
generated
vendored
@ -14,6 +14,14 @@ service Content {
|
||||
// existence.
|
||||
rpc Info(InfoRequest) returns (InfoResponse);
|
||||
|
||||
// List streams the entire set of content as Info objects and closes the
|
||||
// stream.
|
||||
//
|
||||
// Typically, this will yield a large response, chunked into messages.
|
||||
// Clients should make provisions to ensure they can handle the entire data
|
||||
// set.
|
||||
rpc List(ListContentRequest) returns (stream ListContentResponse);
|
||||
|
||||
// Delete will delete the referenced object.
|
||||
rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
|
||||
|
||||
@ -25,9 +33,10 @@ service Content {
|
||||
// Status returns the status of ongoing object ingestions, started via
|
||||
// Write.
|
||||
//
|
||||
// For active ingestions, the status will be streamed until the client
|
||||
// closes the connection or all matched ingestions are committed.
|
||||
rpc Status(StatusRequest) returns (stream StatusResponse);
|
||||
// Only those matching the regular expression will be provided in the
|
||||
// response. If the provided regular expression is empty, all ingestions
|
||||
// will be provided.
|
||||
rpc Status(StatusRequest) returns (StatusResponse);
|
||||
|
||||
// Write begins or resumes writes to a resource identified by a unique ref.
|
||||
// Only one active stream may exist at a time for each ref.
|
||||
@ -46,13 +55,13 @@ service Content {
|
||||
// When completed, the commit flag should be set to true. If expected size
|
||||
// or digest is set, the content will be validated against those values.
|
||||
rpc Write(stream WriteRequest) returns (stream WriteResponse);
|
||||
|
||||
// Abort cancels the ongoing write named in the request. Any resources
|
||||
// associated with the write will be collected.
|
||||
rpc Abort(AbortRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message InfoRequest {
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message InfoResponse {
|
||||
message Info {
|
||||
// Digest is the hash identity of the blob.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
|
||||
@ -63,6 +72,20 @@ message InfoResponse {
|
||||
google.protobuf.Timestamp committed_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message InfoRequest {
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message InfoResponse {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListContentRequest {}
|
||||
|
||||
message ListContentResponse {
|
||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteContentRequest {
|
||||
// Digest specifies which content to delete.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
@ -90,6 +113,22 @@ message ReadResponse {
|
||||
bytes data = 2; // actual data
|
||||
}
|
||||
|
||||
message StatusRequest {
|
||||
string regexp = 1;
|
||||
}
|
||||
|
||||
message Status {
|
||||
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
string ref = 3;
|
||||
int64 offset = 4;
|
||||
int64 total = 5;
|
||||
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message StatusResponse {
|
||||
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// WriteAction defines the behavior of a WriteRequest.
|
||||
enum WriteAction {
|
||||
@ -116,12 +155,6 @@ enum WriteAction {
|
||||
//
|
||||
// This action will always terminate the write.
|
||||
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
||||
|
||||
// WriteActionAbort will release any resources associated with the write
|
||||
// and free up the ref for a completely new set of writes.
|
||||
//
|
||||
// This action will always terminate the write.
|
||||
ABORT = -1 [(gogoproto.enumvalue_customname) = "WriteActionAbort"];
|
||||
}
|
||||
|
||||
// WriteRequest writes data to the request ref at offset.
|
||||
@ -213,20 +246,10 @@ message WriteResponse {
|
||||
|
||||
// Digest, if present, includes the digest up to the currently committed
|
||||
// bytes. If action is commit, this field will be set. It is implementation
|
||||
// defined if this is set for other actions, except abort. On abort, this
|
||||
// will be empty.
|
||||
// defined if this is set for other actions.
|
||||
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message StatusRequest {
|
||||
repeated string refs = 1;
|
||||
repeated string prefix = 2;
|
||||
}
|
||||
|
||||
message StatusResponse {
|
||||
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
string ref = 3;
|
||||
int64 offset = 4;
|
||||
int64 total = 5;
|
||||
message AbortRequest {
|
||||
string ref = 1;
|
||||
}
|
||||
|
439
vendor/github.com/containerd/containerd/api/services/execution/execution.pb.go
generated
vendored
439
vendor/github.com/containerd/containerd/api/services/execution/execution.pb.go
generated
vendored
@ -23,6 +23,8 @@
|
||||
ExecResponse
|
||||
PtyRequest
|
||||
CloseStdinRequest
|
||||
PauseRequest
|
||||
ResumeRequest
|
||||
*/
|
||||
package execution
|
||||
|
||||
@ -193,6 +195,22 @@ func (m *CloseStdinRequest) Reset() { *m = CloseStdinRequest{
|
||||
func (*CloseStdinRequest) ProtoMessage() {}
|
||||
func (*CloseStdinRequest) Descriptor() ([]byte, []int) { return fileDescriptorExecution, []int{13} }
|
||||
|
||||
type PauseRequest struct {
|
||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PauseRequest) Reset() { *m = PauseRequest{} }
|
||||
func (*PauseRequest) ProtoMessage() {}
|
||||
func (*PauseRequest) Descriptor() ([]byte, []int) { return fileDescriptorExecution, []int{14} }
|
||||
|
||||
type ResumeRequest struct {
|
||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ResumeRequest) Reset() { *m = ResumeRequest{} }
|
||||
func (*ResumeRequest) ProtoMessage() {}
|
||||
func (*ResumeRequest) Descriptor() ([]byte, []int) { return fileDescriptorExecution, []int{15} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*CreateRequest)(nil), "containerd.v1.services.CreateRequest")
|
||||
proto.RegisterType((*CreateResponse)(nil), "containerd.v1.services.CreateResponse")
|
||||
@ -208,6 +226,8 @@ func init() {
|
||||
proto.RegisterType((*ExecResponse)(nil), "containerd.v1.services.ExecResponse")
|
||||
proto.RegisterType((*PtyRequest)(nil), "containerd.v1.services.PtyRequest")
|
||||
proto.RegisterType((*CloseStdinRequest)(nil), "containerd.v1.services.CloseStdinRequest")
|
||||
proto.RegisterType((*PauseRequest)(nil), "containerd.v1.services.PauseRequest")
|
||||
proto.RegisterType((*ResumeRequest)(nil), "containerd.v1.services.ResumeRequest")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -231,6 +251,8 @@ type ContainerServiceClient interface {
|
||||
Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error)
|
||||
Pty(ctx context.Context, in *PtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
|
||||
CloseStdin(ctx context.Context, in *CloseStdinRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
|
||||
Pause(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
|
||||
Resume(ctx context.Context, in *ResumeRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
|
||||
}
|
||||
|
||||
type containerServiceClient struct {
|
||||
@ -354,6 +376,24 @@ func (c *containerServiceClient) CloseStdin(ctx context.Context, in *CloseStdinR
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *containerServiceClient) Pause(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
|
||||
out := new(google_protobuf.Empty)
|
||||
err := grpc.Invoke(ctx, "/containerd.v1.services.ContainerService/Pause", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *containerServiceClient) Resume(ctx context.Context, in *ResumeRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
|
||||
out := new(google_protobuf.Empty)
|
||||
err := grpc.Invoke(ctx, "/containerd.v1.services.ContainerService/Resume", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for ContainerService service
|
||||
|
||||
type ContainerServiceServer interface {
|
||||
@ -367,6 +407,8 @@ type ContainerServiceServer interface {
|
||||
Exec(context.Context, *ExecRequest) (*ExecResponse, error)
|
||||
Pty(context.Context, *PtyRequest) (*google_protobuf.Empty, error)
|
||||
CloseStdin(context.Context, *CloseStdinRequest) (*google_protobuf.Empty, error)
|
||||
Pause(context.Context, *PauseRequest) (*google_protobuf.Empty, error)
|
||||
Resume(context.Context, *ResumeRequest) (*google_protobuf.Empty, error)
|
||||
}
|
||||
|
||||
func RegisterContainerServiceServer(s *grpc.Server, srv ContainerServiceServer) {
|
||||
@ -556,6 +598,42 @@ func _ContainerService_CloseStdin_Handler(srv interface{}, ctx context.Context,
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _ContainerService_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PauseRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ContainerServiceServer).Pause(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/containerd.v1.services.ContainerService/Pause",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ContainerServiceServer).Pause(ctx, req.(*PauseRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _ContainerService_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ResumeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ContainerServiceServer).Resume(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/containerd.v1.services.ContainerService/Resume",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ContainerServiceServer).Resume(ctx, req.(*ResumeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _ContainerService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "containerd.v1.services.ContainerService",
|
||||
HandlerType: (*ContainerServiceServer)(nil),
|
||||
@ -596,6 +674,14 @@ var _ContainerService_serviceDesc = grpc.ServiceDesc{
|
||||
MethodName: "CloseStdin",
|
||||
Handler: _ContainerService_CloseStdin_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Pause",
|
||||
Handler: _ContainerService_Pause_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Resume",
|
||||
Handler: _ContainerService_Resume_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
@ -1083,6 +1169,54 @@ func (m *CloseStdinRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *PauseRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *PauseRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ID) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintExecution(dAtA, i, uint64(len(m.ID)))
|
||||
i += copy(dAtA[i:], m.ID)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *ResumeRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ResumeRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ID) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintExecution(dAtA, i, uint64(len(m.ID)))
|
||||
i += copy(dAtA[i:], m.ID)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Execution(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
@ -1317,6 +1451,26 @@ func (m *CloseStdinRequest) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *PauseRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ID)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovExecution(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ResumeRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ID)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovExecution(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovExecution(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
@ -1489,6 +1643,26 @@ func (this *CloseStdinRequest) String() string {
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *PauseRequest) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&PauseRequest{`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *ResumeRequest) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ResumeRequest{`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringExecution(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
@ -3060,6 +3234,164 @@ func (m *CloseStdinRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *PauseRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowExecution
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: PauseRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: PauseRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowExecution
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthExecution
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ID = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipExecution(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthExecution
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ResumeRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowExecution
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ResumeRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ResumeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowExecution
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthExecution
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ID = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipExecution(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthExecution
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipExecution(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
@ -3170,56 +3502,59 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptorExecution = []byte{
|
||||
// 814 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcf, 0x6f, 0xe2, 0x46,
|
||||
0x14, 0x8e, 0xf9, 0xe1, 0x90, 0x47, 0x48, 0xd3, 0x51, 0x84, 0x5c, 0x57, 0x02, 0xe4, 0x26, 0x29,
|
||||
0xbd, 0x98, 0x96, 0xde, 0xaa, 0xb6, 0x12, 0x21, 0xa8, 0x8a, 0xd2, 0x34, 0xa9, 0xa9, 0xd4, 0x63,
|
||||
0xe4, 0xe0, 0x09, 0x8c, 0x64, 0x3c, 0xae, 0x67, 0x9c, 0x86, 0x5b, 0x7b, 0xef, 0xa1, 0x7f, 0xc5,
|
||||
0xde, 0xf7, 0xbf, 0xc8, 0x71, 0x8f, 0x7b, 0xca, 0x6e, 0xf8, 0x4b, 0x56, 0xe3, 0x1f, 0x60, 0x08,
|
||||
0xb3, 0xde, 0x5c, 0xd0, 0x7b, 0x8f, 0xf7, 0xc6, 0xdf, 0xfb, 0xe6, 0xfb, 0x6c, 0xf8, 0x65, 0x4c,
|
||||
0xf8, 0x24, 0xbc, 0x31, 0x47, 0x74, 0xda, 0x19, 0x51, 0x8f, 0xdb, 0xc4, 0xc3, 0x81, 0x93, 0x0d,
|
||||
0x6d, 0x9f, 0x74, 0x18, 0x0e, 0xee, 0xc8, 0x08, 0xb3, 0x0e, 0xbe, 0xc7, 0xa3, 0x90, 0x13, 0xea,
|
||||
0x2d, 0x23, 0xd3, 0x0f, 0x28, 0xa7, 0xa8, 0xbe, 0x1c, 0x31, 0xef, 0xbe, 0x33, 0xd3, 0x09, 0xfd,
|
||||
0xcb, 0x31, 0xa5, 0x63, 0x17, 0x77, 0xa2, 0xae, 0x9b, 0xf0, 0xb6, 0x83, 0xa7, 0x3e, 0x9f, 0xc5,
|
||||
0x43, 0xfa, 0x17, 0xeb, 0x7f, 0xda, 0x5e, 0xfa, 0xd7, 0xc1, 0x98, 0x8e, 0x69, 0x14, 0x76, 0x44,
|
||||
0x94, 0x54, 0x7f, 0xfc, 0x24, 0xb8, 0x7c, 0xe6, 0x63, 0xd6, 0x99, 0xd2, 0xd0, 0xe3, 0xf1, 0x6f,
|
||||
0x32, 0x7d, 0xfa, 0x82, 0xe9, 0x45, 0x71, 0x19, 0x25, 0xa7, 0x34, 0xd7, 0x41, 0x73, 0x32, 0xc5,
|
||||
0x8c, 0xdb, 0x53, 0x3f, 0x6e, 0x30, 0xfe, 0x2d, 0x40, 0xad, 0x1f, 0x60, 0x9b, 0x63, 0x0b, 0xff,
|
||||
0x15, 0x62, 0xc6, 0x51, 0x1d, 0x0a, 0xc4, 0xd1, 0x94, 0x96, 0xd2, 0xde, 0x39, 0x51, 0xe7, 0x8f,
|
||||
0xcd, 0xc2, 0xd9, 0xa9, 0x55, 0x20, 0x0e, 0x6a, 0x43, 0x89, 0xf9, 0x78, 0xa4, 0x15, 0x5a, 0x4a,
|
||||
0xbb, 0xda, 0x3d, 0x30, 0xe3, 0x93, 0xcd, 0xf4, 0x64, 0xb3, 0xe7, 0xcd, 0xac, 0xa8, 0x03, 0x75,
|
||||
0x41, 0x0d, 0x28, 0xe5, 0xb7, 0x4c, 0x2b, 0xb6, 0x8a, 0xed, 0x6a, 0x57, 0x37, 0x57, 0xf9, 0x8e,
|
||||
0x40, 0x9b, 0x17, 0x62, 0x59, 0x2b, 0xe9, 0x44, 0x1a, 0x6c, 0x07, 0xa1, 0x27, 0xd0, 0x69, 0x25,
|
||||
0xf1, 0x68, 0x2b, 0x4d, 0xd1, 0x01, 0x94, 0x19, 0x77, 0x88, 0xa7, 0x95, 0xa3, 0x7a, 0x9c, 0xa0,
|
||||
0x3a, 0xa8, 0x8c, 0x3b, 0x34, 0xe4, 0x9a, 0x1a, 0x95, 0x93, 0x2c, 0xa9, 0xe3, 0x20, 0xd0, 0xb6,
|
||||
0x17, 0x75, 0x1c, 0x04, 0x48, 0x87, 0x0a, 0xc7, 0xc1, 0x94, 0x78, 0xb6, 0xab, 0x55, 0x5a, 0x4a,
|
||||
0xbb, 0x62, 0x2d, 0x72, 0xe3, 0x07, 0xd8, 0x4b, 0x29, 0x60, 0x3e, 0xf5, 0x18, 0x96, 0x72, 0xb0,
|
||||
0x0f, 0x45, 0x9f, 0x38, 0x11, 0x05, 0x35, 0x4b, 0x84, 0xc6, 0x31, 0xec, 0x0e, 0xb9, 0x1d, 0xf0,
|
||||
0x1c, 0xf6, 0x8c, 0xaf, 0xa1, 0x76, 0x8a, 0x5d, 0x9c, 0x4b, 0xb3, 0xf1, 0x9f, 0x02, 0x7b, 0x69,
|
||||
0x67, 0x0e, 0x9a, 0x26, 0x54, 0xf1, 0x3d, 0xe1, 0xd7, 0x8c, 0xdb, 0x3c, 0x64, 0x09, 0x2a, 0x10,
|
||||
0xa5, 0x61, 0x54, 0x41, 0x3d, 0xd8, 0x11, 0x19, 0x76, 0xae, 0x6d, 0xae, 0x15, 0xa3, 0x7b, 0xd3,
|
||||
0x9f, 0xdd, 0xdb, 0x1f, 0xa9, 0x22, 0x4e, 0x2a, 0x0f, 0x8f, 0xcd, 0xad, 0xff, 0xdf, 0x35, 0x15,
|
||||
0xab, 0x12, 0x8f, 0xf5, 0xb8, 0x71, 0x04, 0xd5, 0x33, 0xef, 0x96, 0xe6, 0xa1, 0xae, 0x41, 0xf5,
|
||||
0x57, 0xc2, 0x52, 0x16, 0x8c, 0xdf, 0x60, 0x37, 0x4e, 0x93, 0x0d, 0x7e, 0x06, 0x58, 0x48, 0x80,
|
||||
0x69, 0x4a, 0xa4, 0x8a, 0xc6, 0x46, 0x55, 0xf4, 0xd3, 0x9a, 0x95, 0x99, 0x30, 0x2e, 0xa1, 0x7a,
|
||||
0x4e, 0x5c, 0x37, 0x4f, 0xa2, 0xe2, 0xf2, 0xc9, 0x58, 0x5c, 0x71, 0xcc, 0x45, 0x92, 0x89, 0x6b,
|
||||
0xb3, 0x5d, 0x37, 0x62, 0xa0, 0x62, 0x89, 0xd0, 0xf8, 0x0c, 0x6a, 0x83, 0x3b, 0xec, 0x71, 0x96,
|
||||
0x22, 0x7e, 0xad, 0x40, 0x75, 0x70, 0x8f, 0x47, 0x79, 0x8f, 0xc8, 0xea, 0xa8, 0xb0, 0xaa, 0xa3,
|
||||
0xa5, 0x52, 0x8b, 0x9b, 0x95, 0x5a, 0x92, 0x28, 0xb5, 0xbc, 0xa2, 0xd4, 0xd4, 0x67, 0x6a, 0x9e,
|
||||
0xcf, 0x8c, 0x16, 0xec, 0xc6, 0x90, 0x13, 0x96, 0x13, 0x75, 0x2a, 0x4b, 0x75, 0x3a, 0x00, 0x57,
|
||||
0x7c, 0x96, 0xb7, 0xd3, 0x33, 0x55, 0x8b, 0x4d, 0xfe, 0x26, 0x0e, 0x9f, 0x44, 0x9b, 0xd4, 0xac,
|
||||
0x38, 0x11, 0x88, 0x27, 0x98, 0x8c, 0x27, 0xf1, 0x26, 0x35, 0x2b, 0xc9, 0x8c, 0x9f, 0xe0, 0xf3,
|
||||
0xbe, 0x4b, 0x19, 0x1e, 0x8a, 0x7d, 0x5f, 0xfc, 0xb0, 0xee, 0x2b, 0x15, 0xf6, 0x17, 0xd7, 0x3e,
|
||||
0x8c, 0xdf, 0xc5, 0xe8, 0x4f, 0x50, 0x63, 0x4f, 0xa2, 0x23, 0x73, 0xf3, 0xdb, 0xda, 0x5c, 0x79,
|
||||
0x6d, 0xe9, 0xc7, 0x79, 0x6d, 0x09, 0x49, 0x03, 0x28, 0x47, 0x86, 0x45, 0x87, 0xb2, 0x81, 0xac,
|
||||
0x9f, 0xf5, 0xfa, 0x33, 0xfe, 0x07, 0xe2, 0x9b, 0x20, 0xf0, 0xc5, 0x2e, 0x95, 0xe3, 0x5b, 0xf1,
|
||||
0xbb, 0x1c, 0xdf, 0x9a, 0xd9, 0xcf, 0xa1, 0x24, 0x0c, 0x87, 0xbe, 0x92, 0xf5, 0x67, 0xec, 0xa8,
|
||||
0xe7, 0x78, 0x08, 0xfd, 0x0e, 0x25, 0xe1, 0x43, 0xf9, 0x61, 0x19, 0xd3, 0xea, 0x87, 0x1f, 0x6f,
|
||||
0x4a, 0xf0, 0xf5, 0xa1, 0x24, 0xac, 0x28, 0x3f, 0x32, 0x63, 0x54, 0x29, 0x7b, 0x17, 0xa0, 0xc6,
|
||||
0xf6, 0x93, 0xb3, 0xb7, 0x62, 0x4f, 0x7d, 0xf3, 0x27, 0x24, 0xea, 0xf9, 0x56, 0x11, 0x6b, 0x0a,
|
||||
0x23, 0xc8, 0x31, 0x65, 0x9c, 0x2d, 0x5f, 0x73, 0xc5, 0x4b, 0x3d, 0x28, 0x5e, 0xf1, 0x19, 0x32,
|
||||
0x64, 0xcd, 0x4b, 0x5b, 0x49, 0x97, 0xbc, 0x04, 0x58, 0xda, 0x02, 0x7d, 0x23, 0xd5, 0xe7, 0xba,
|
||||
0x75, 0x64, 0x07, 0x9e, 0x68, 0x0f, 0x4f, 0x8d, 0xad, 0xb7, 0x4f, 0x8d, 0xad, 0x7f, 0xe6, 0x0d,
|
||||
0xe5, 0x61, 0xde, 0x50, 0xde, 0xcc, 0x1b, 0xca, 0xfb, 0x79, 0x43, 0xb9, 0x51, 0xa3, 0xce, 0xef,
|
||||
0x3f, 0x04, 0x00, 0x00, 0xff, 0xff, 0x74, 0xa6, 0x26, 0x26, 0x22, 0x09, 0x00, 0x00,
|
||||
// 854 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x4f, 0x8f, 0xdb, 0x44,
|
||||
0x14, 0x5f, 0xe7, 0x8f, 0x37, 0x7d, 0xd9, 0x94, 0x32, 0x5a, 0xad, 0x8c, 0x91, 0x92, 0xc8, 0xb4,
|
||||
0x25, 0x5c, 0x1c, 0x58, 0x6e, 0x08, 0x90, 0xb2, 0xd9, 0xa8, 0xaa, 0x4a, 0xe9, 0xe2, 0x20, 0x71,
|
||||
0xac, 0xbc, 0xf1, 0x6c, 0x32, 0x92, 0xe3, 0x31, 0x9e, 0xf1, 0xb2, 0xb9, 0xc1, 0x9d, 0x03, 0x5f,
|
||||
0x85, 0x0b, 0x9f, 0x61, 0x8f, 0x1c, 0x39, 0x15, 0x9a, 0x4f, 0x82, 0x66, 0xc6, 0x4e, 0xec, 0x6c,
|
||||
0xa6, 0xee, 0x5e, 0xa2, 0x79, 0x2f, 0xef, 0xbd, 0xf9, 0xbd, 0x37, 0xbf, 0xdf, 0x33, 0x3c, 0x9b,
|
||||
0x13, 0xbe, 0x48, 0x2f, 0xdd, 0x19, 0x5d, 0x0e, 0x67, 0x34, 0xe2, 0x3e, 0x89, 0x70, 0x12, 0x14,
|
||||
0x8f, 0x7e, 0x4c, 0x86, 0x0c, 0x27, 0xd7, 0x64, 0x86, 0xd9, 0x10, 0xdf, 0xe0, 0x59, 0xca, 0x09,
|
||||
0x8d, 0xb6, 0x27, 0x37, 0x4e, 0x28, 0xa7, 0xe8, 0x64, 0x9b, 0xe2, 0x5e, 0x7f, 0xe1, 0xe6, 0x19,
|
||||
0xf6, 0xc7, 0x73, 0x4a, 0xe7, 0x21, 0x1e, 0xca, 0xa8, 0xcb, 0xf4, 0x6a, 0x88, 0x97, 0x31, 0x5f,
|
||||
0xa9, 0x24, 0xfb, 0xa3, 0xdd, 0x3f, 0xfd, 0x28, 0xff, 0xeb, 0x78, 0x4e, 0xe7, 0x54, 0x1e, 0x87,
|
||||
0xe2, 0x94, 0x79, 0xbf, 0x7e, 0x2f, 0xb8, 0x7c, 0x15, 0x63, 0x36, 0x5c, 0xd2, 0x34, 0xe2, 0xea,
|
||||
0x37, 0xcb, 0x3e, 0xbf, 0x47, 0xf6, 0xc6, 0xb9, 0x3d, 0x65, 0x55, 0x7a, 0xbb, 0xa0, 0x39, 0x59,
|
||||
0x62, 0xc6, 0xfd, 0x65, 0xac, 0x02, 0x9c, 0xdf, 0x6a, 0xd0, 0x19, 0x27, 0xd8, 0xe7, 0xd8, 0xc3,
|
||||
0x3f, 0xa7, 0x98, 0x71, 0x74, 0x02, 0x35, 0x12, 0x58, 0x46, 0xdf, 0x18, 0x3c, 0x38, 0x33, 0xd7,
|
||||
0x6f, 0x7a, 0xb5, 0xe7, 0xe7, 0x5e, 0x8d, 0x04, 0x68, 0x00, 0x0d, 0x16, 0xe3, 0x99, 0x55, 0xeb,
|
||||
0x1b, 0x83, 0xf6, 0xe9, 0xb1, 0xab, 0x2a, 0xbb, 0x79, 0x65, 0x77, 0x14, 0xad, 0x3c, 0x19, 0x81,
|
||||
0x4e, 0xc1, 0x4c, 0x28, 0xe5, 0x57, 0xcc, 0xaa, 0xf7, 0xeb, 0x83, 0xf6, 0xa9, 0xed, 0x96, 0xe7,
|
||||
0x2d, 0x41, 0xbb, 0x2f, 0x45, 0xb3, 0x5e, 0x16, 0x89, 0x2c, 0x38, 0x4c, 0xd2, 0x48, 0xa0, 0xb3,
|
||||
0x1a, 0xe2, 0x6a, 0x2f, 0x37, 0xd1, 0x31, 0x34, 0x19, 0x0f, 0x48, 0x64, 0x35, 0xa5, 0x5f, 0x19,
|
||||
0xe8, 0x04, 0x4c, 0xc6, 0x03, 0x9a, 0x72, 0xcb, 0x94, 0xee, 0xcc, 0xca, 0xfc, 0x38, 0x49, 0xac,
|
||||
0xc3, 0x8d, 0x1f, 0x27, 0x09, 0xb2, 0xa1, 0xc5, 0x71, 0xb2, 0x24, 0x91, 0x1f, 0x5a, 0xad, 0xbe,
|
||||
0x31, 0x68, 0x79, 0x1b, 0xdb, 0xf9, 0x0a, 0x1e, 0xe6, 0x23, 0x60, 0x31, 0x8d, 0x18, 0xd6, 0xce,
|
||||
0xe0, 0x11, 0xd4, 0x63, 0x12, 0xc8, 0x11, 0x74, 0x3c, 0x71, 0x74, 0x9e, 0xc2, 0xd1, 0x94, 0xfb,
|
||||
0x09, 0xaf, 0x98, 0x9e, 0xf3, 0x29, 0x74, 0xce, 0x71, 0x88, 0x2b, 0xc7, 0xec, 0xfc, 0x6e, 0xc0,
|
||||
0xc3, 0x3c, 0xb2, 0x02, 0x4d, 0x0f, 0xda, 0xf8, 0x86, 0xf0, 0xd7, 0x8c, 0xfb, 0x3c, 0x65, 0x19,
|
||||
0x2a, 0x10, 0xae, 0xa9, 0xf4, 0xa0, 0x11, 0x3c, 0x10, 0x16, 0x0e, 0x5e, 0xfb, 0xdc, 0xaa, 0xcb,
|
||||
0x77, 0xb3, 0xef, 0xbc, 0xdb, 0x8f, 0x39, 0x23, 0xce, 0x5a, 0xb7, 0x6f, 0x7a, 0x07, 0x7f, 0xfc,
|
||||
0xdb, 0x33, 0xbc, 0x96, 0x4a, 0x1b, 0x71, 0xe7, 0x09, 0xb4, 0x9f, 0x47, 0x57, 0xb4, 0x0a, 0x75,
|
||||
0x07, 0xda, 0xdf, 0x11, 0x96, 0x4f, 0xc1, 0xf9, 0x1e, 0x8e, 0x94, 0x99, 0x75, 0xf0, 0x2d, 0xc0,
|
||||
0x86, 0x02, 0xcc, 0x32, 0x24, 0x2b, 0xba, 0x7b, 0x59, 0x31, 0xce, 0x7d, 0x5e, 0x21, 0xc3, 0x79,
|
||||
0x05, 0xed, 0x17, 0x24, 0x0c, 0xab, 0x28, 0x2a, 0x1e, 0x9f, 0xcc, 0xc5, 0x13, 0xab, 0x59, 0x64,
|
||||
0x96, 0x78, 0x36, 0x3f, 0x0c, 0xe5, 0x04, 0x5a, 0x9e, 0x38, 0x3a, 0x1f, 0x40, 0x67, 0x72, 0x8d,
|
||||
0x23, 0xce, 0x72, 0xc4, 0x7f, 0x1a, 0xd0, 0x9e, 0xdc, 0xe0, 0x59, 0xd5, 0x15, 0x45, 0x1e, 0xd5,
|
||||
0xca, 0x3c, 0xda, 0x32, 0xb5, 0xbe, 0x9f, 0xa9, 0x0d, 0x0d, 0x53, 0x9b, 0x25, 0xa6, 0xe6, 0x3a,
|
||||
0x33, 0xab, 0x74, 0xe6, 0xf4, 0xe1, 0x48, 0x41, 0xce, 0xa6, 0x9c, 0xb1, 0xd3, 0xd8, 0xb2, 0x33,
|
||||
0x00, 0xb8, 0xe0, 0xab, 0xaa, 0x9e, 0xee, 0xb0, 0x5a, 0x74, 0xf2, 0x0b, 0x09, 0xf8, 0x42, 0x76,
|
||||
0xd2, 0xf1, 0x94, 0x21, 0x10, 0x2f, 0x30, 0x99, 0x2f, 0x54, 0x27, 0x1d, 0x2f, 0xb3, 0x9c, 0x6f,
|
||||
0xe0, 0xc3, 0x71, 0x48, 0x19, 0x9e, 0x8a, 0x7e, 0xef, 0x7d, 0x99, 0x90, 0xd0, 0x85, 0x9f, 0x32,
|
||||
0xfc, 0x1e, 0x12, 0xf2, 0x30, 0x4b, 0x97, 0x55, 0x81, 0xa7, 0x7f, 0x1d, 0xc2, 0xa3, 0x0d, 0x8f,
|
||||
0xa6, 0x6a, 0xb9, 0xa3, 0x9f, 0xc0, 0x54, 0x22, 0x47, 0x4f, 0xdc, 0xfd, 0xeb, 0xdf, 0x2d, 0xed,
|
||||
0x41, 0xfb, 0x69, 0x55, 0x58, 0x36, 0xf5, 0x09, 0x34, 0xe5, 0x06, 0x40, 0x8f, 0x75, 0x09, 0xc5,
|
||||
0x05, 0x61, 0x9f, 0xdc, 0x79, 0xd0, 0x89, 0xf8, 0xc8, 0x08, 0x7c, 0x4a, 0xf6, 0x7a, 0x7c, 0xa5,
|
||||
0x05, 0xa2, 0xc7, 0xb7, 0xb3, 0x3d, 0x5e, 0x40, 0x43, 0x28, 0x18, 0x7d, 0xa2, 0x8b, 0x2f, 0xe8,
|
||||
0xdb, 0xae, 0x10, 0x25, 0xfa, 0x01, 0x1a, 0x42, 0xd8, 0xfa, 0x62, 0x85, 0x2d, 0x60, 0x3f, 0x7e,
|
||||
0x77, 0x50, 0x86, 0x6f, 0x0c, 0x0d, 0xa1, 0x6d, 0x7d, 0xc9, 0x82, 0xf2, 0xb5, 0xd3, 0x7b, 0x09,
|
||||
0xa6, 0xd2, 0xb3, 0x7e, 0x7a, 0x25, 0xbd, 0xdb, 0xfb, 0xbf, 0x49, 0x32, 0xe6, 0x73, 0x43, 0xb4,
|
||||
0x29, 0x94, 0xa5, 0xc7, 0x54, 0x58, 0x15, 0xfa, 0x36, 0x4b, 0xe2, 0x1c, 0x41, 0xfd, 0x82, 0xaf,
|
||||
0x90, 0xa3, 0x0b, 0xde, 0xea, 0x54, 0xdb, 0xe4, 0x2b, 0x80, 0xad, 0xce, 0xd0, 0x67, 0x5a, 0x7e,
|
||||
0xee, 0x6a, 0x51, 0x5b, 0x70, 0x02, 0x4d, 0xa9, 0x3c, 0x3d, 0x75, 0x8b, 0xc2, 0xd4, 0x96, 0x79,
|
||||
0x06, 0xa6, 0x12, 0xa6, 0x7e, 0xf8, 0x25, 0xe1, 0xea, 0x0a, 0x9d, 0x59, 0xb7, 0x6f, 0xbb, 0x07,
|
||||
0xff, 0xbc, 0xed, 0x1e, 0xfc, 0xba, 0xee, 0x1a, 0xb7, 0xeb, 0xae, 0xf1, 0xf7, 0xba, 0x6b, 0xfc,
|
||||
0xb7, 0xee, 0x1a, 0x97, 0xa6, 0x8c, 0xfc, 0xf2, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xba,
|
||||
0xdb, 0xdc, 0x03, 0x0a, 0x00, 0x00,
|
||||
}
|
||||
|
10
vendor/github.com/containerd/containerd/api/services/execution/execution.proto
generated
vendored
10
vendor/github.com/containerd/containerd/api/services/execution/execution.proto
generated
vendored
@ -20,6 +20,8 @@ service ContainerService {
|
||||
rpc Exec(ExecRequest) returns (ExecResponse);
|
||||
rpc Pty(PtyRequest) returns (google.protobuf.Empty);
|
||||
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
|
||||
rpc Pause(PauseRequest) returns (google.protobuf.Empty);
|
||||
rpc Resume(ResumeRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message CreateRequest {
|
||||
@ -96,3 +98,11 @@ message CloseStdinRequest {
|
||||
string id = 1 [(gogoproto.customname) = "ID"];
|
||||
uint32 pid = 2;
|
||||
}
|
||||
|
||||
message PauseRequest {
|
||||
string id = 1 [(gogoproto.customname) = "ID"];
|
||||
}
|
||||
|
||||
message ResumeRequest {
|
||||
string id = 1 [(gogoproto.customname) = "ID"];
|
||||
}
|
||||
|
46
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
46
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
@ -263,7 +263,8 @@ type changeWriter struct {
|
||||
tw *tar.Writer
|
||||
source string
|
||||
whiteoutT time.Time
|
||||
inodeCache map[uint64]string
|
||||
inodeSrc map[uint64]string
|
||||
inodeRefs map[uint64][]string
|
||||
}
|
||||
|
||||
func newChangeWriter(w io.Writer, source string) *changeWriter {
|
||||
@ -271,7 +272,8 @@ func newChangeWriter(w io.Writer, source string) *changeWriter {
|
||||
tw: tar.NewWriter(w),
|
||||
source: source,
|
||||
whiteoutT: time.Now(),
|
||||
inodeCache: map[uint64]string{},
|
||||
inodeSrc: map[uint64]string{},
|
||||
inodeRefs: map[uint64][]string{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -334,15 +336,28 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
||||
return errors.Wrap(err, "failed to set device headers")
|
||||
}
|
||||
|
||||
linkname, err := fs.GetLinkSource(name, f, cw.inodeCache)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get hardlink")
|
||||
}
|
||||
|
||||
if linkname != "" {
|
||||
// additionalLinks stores file names which must be linked to
|
||||
// this file when this file is added
|
||||
var additionalLinks []string
|
||||
inode, isHardlink := fs.GetLinkInfo(f)
|
||||
if isHardlink {
|
||||
// If the inode has a source, always link to it
|
||||
if source, ok := cw.inodeSrc[inode]; ok {
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = linkname
|
||||
hdr.Linkname = source
|
||||
hdr.Size = 0
|
||||
} else {
|
||||
if k == fs.ChangeKindUnmodified {
|
||||
cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
|
||||
return nil
|
||||
}
|
||||
cw.inodeSrc[inode] = name
|
||||
additionalLinks = cw.inodeRefs[inode]
|
||||
delete(cw.inodeRefs, inode)
|
||||
}
|
||||
} else if k == fs.ChangeKindUnmodified {
|
||||
// Nothing to write to diff
|
||||
return nil
|
||||
}
|
||||
|
||||
if capability, err := getxattr(source, "security.capability"); err != nil {
|
||||
@ -374,6 +389,19 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
||||
return errors.New("short write copying file")
|
||||
}
|
||||
}
|
||||
|
||||
if additionalLinks != nil {
|
||||
source = hdr.Name
|
||||
for _, extra := range additionalLinks {
|
||||
hdr.Name = extra
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = source
|
||||
hdr.Size = 0
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
@ -6,9 +8,9 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stevvooe/continuity/sysx"
|
||||
)
|
||||
|
||||
func tarName(p string) (string, error) {
|
14
vendor/github.com/containerd/containerd/archive/time_darwin.go
generated
vendored
Normal file
14
vendor/github.com/containerd/containerd/archive/time_darwin.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// as at MacOS 10.12 there is apparently no way to set timestamps
|
||||
// with nanosecond precision. We could fall back to utimes/lutimes
|
||||
// and lose the precision as a temporary workaround.
|
||||
func chtimes(path string, atime, mtime time.Time) error {
|
||||
return errors.New("OSX missing UtimesNanoAt")
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
// +build linux freebsd
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
7
vendor/github.com/containerd/containerd/container.go
generated
vendored
7
vendor/github.com/containerd/containerd/container.go
generated
vendored
@ -14,6 +14,10 @@ type Container interface {
|
||||
Start(context.Context) error
|
||||
// State returns the container's state
|
||||
State(context.Context) (State, error)
|
||||
// Pause pauses the container process
|
||||
Pause(context.Context) error
|
||||
// Resume unpauses the container process
|
||||
Resume(context.Context) error
|
||||
// Kill signals a container
|
||||
Kill(context.Context, uint32, bool) error
|
||||
// Exec adds a process into the container
|
||||
@ -26,9 +30,6 @@ type Container interface {
|
||||
|
||||
type LinuxContainer interface {
|
||||
Container
|
||||
|
||||
Pause(context.Context) error
|
||||
Resume(context.Context) error
|
||||
}
|
||||
|
||||
type ExecOpts struct {
|
||||
|
54
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
54
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
@ -28,24 +28,62 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type Ingester interface {
|
||||
Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Consider a very different name for this struct. Info is way
|
||||
// to general. It also reads very weird in certain context, like pluralization.
|
||||
type Info struct {
|
||||
Digest digest.Digest
|
||||
Size int64
|
||||
CommittedAt time.Time
|
||||
}
|
||||
|
||||
type Provider interface {
|
||||
Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type Status struct {
|
||||
Ref string
|
||||
Offset int64
|
||||
Total int64
|
||||
Expected digest.Digest
|
||||
StartedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// WalkFunc defines the callback for a blob walk.
|
||||
type WalkFunc func(Info) error
|
||||
|
||||
// Manager provides methods for inspecting, listing and removing content.
|
||||
type Manager interface {
|
||||
// Info will return metadata about content available in the content store.
|
||||
//
|
||||
// If the content is not present, ErrNotFound will be returned.
|
||||
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
||||
|
||||
// Walk will call fn for each item in the content store.
|
||||
Walk(ctx context.Context, fn WalkFunc) error
|
||||
|
||||
// Delete removes the content from the store.
|
||||
Delete(ctx context.Context, dgst digest.Digest) error
|
||||
|
||||
// Status returns the status of any active ingestions whose ref match the
|
||||
// provided regular expression. If empty, all active ingestions will be
|
||||
// returned.
|
||||
//
|
||||
// TODO(stevvooe): Status may be slighly out of place here. If this remains
|
||||
// here, we should remove Manager and just define these on store.
|
||||
Status(ctx context.Context, re string) ([]Status, error)
|
||||
|
||||
// Abort completely cancels the ingest operation targeted by ref.
|
||||
//
|
||||
// TODO(stevvooe): Same consideration as above. This should really be
|
||||
// restricted to an ingest management interface.
|
||||
Abort(ctx context.Context, ref string) error
|
||||
}
|
||||
|
||||
type Writer interface {
|
||||
io.WriteCloser
|
||||
Status() (Status, error)
|
||||
@ -54,8 +92,12 @@ type Writer interface {
|
||||
Truncate(size int64) error
|
||||
}
|
||||
|
||||
type Ingester interface {
|
||||
Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error)
|
||||
// Store combines the methods of content-oriented interfaces into a set that
|
||||
// are commonly provided by complete implementations.
|
||||
type Store interface {
|
||||
Manager
|
||||
Ingester
|
||||
Provider
|
||||
}
|
||||
|
||||
func IsNotFound(err error) bool {
|
||||
|
137
vendor/github.com/containerd/containerd/content/store.go
generated
vendored
137
vendor/github.com/containerd/containerd/content/store.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -21,21 +22,21 @@ import (
|
||||
//
|
||||
// Store can generally support multi-reader, single-writer ingest of data,
|
||||
// including resumable ingest.
|
||||
type Store struct {
|
||||
type store struct {
|
||||
root string
|
||||
}
|
||||
|
||||
func NewStore(root string) (*Store, error) {
|
||||
func NewStore(root string) (Store, error) {
|
||||
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Store{
|
||||
return &store{
|
||||
root: root,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Store) Info(dgst digest.Digest) (Info, error) {
|
||||
func (s *store) Info(ctx context.Context, dgst digest.Digest) (Info, error) {
|
||||
p := s.blobPath(dgst)
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
@ -46,11 +47,15 @@ func (s *Store) Info(dgst digest.Digest) (Info, error) {
|
||||
return Info{}, err
|
||||
}
|
||||
|
||||
return s.info(dgst, fi), nil
|
||||
}
|
||||
|
||||
func (s *store) info(dgst digest.Digest, fi os.FileInfo) Info {
|
||||
return Info{
|
||||
Digest: dgst,
|
||||
Size: fi.Size(),
|
||||
CommittedAt: fi.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Open returns an io.ReadCloser for the blob.
|
||||
@ -58,7 +63,7 @@ func (s *Store) Info(dgst digest.Digest) (Info, error) {
|
||||
// TODO(stevvooe): This would work much better as an io.ReaderAt in practice.
|
||||
// Right now, we are doing type assertion to tease that out, but it won't scale
|
||||
// well.
|
||||
func (s *Store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
|
||||
func (s *store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
|
||||
fp, err := os.Open(s.blobPath(dgst))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@ -74,7 +79,7 @@ func (s *Store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser,
|
||||
//
|
||||
// While this is safe to do concurrently, safe exist-removal logic must hold
|
||||
// some global lock on the store.
|
||||
func (cs *Store) Delete(dgst digest.Digest) error {
|
||||
func (cs *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if err := os.RemoveAll(cs.blobPath(dgst)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
@ -88,14 +93,7 @@ func (cs *Store) Delete(dgst digest.Digest) error {
|
||||
|
||||
// TODO(stevvooe): Allow querying the set of blobs in the blob store.
|
||||
|
||||
// WalkFunc defines the callback for a blob walk.
|
||||
//
|
||||
// TODO(stevvooe): Remove the file info. Just need size and modtime. Perhaps,
|
||||
// not a huge deal, considering we have a path, but let's not just let this one
|
||||
// go without scrutiny.
|
||||
type WalkFunc func(path string, fi os.FileInfo, dgst digest.Digest) error
|
||||
|
||||
func (cs *Store) Walk(fn WalkFunc) error {
|
||||
func (cs *store) Walk(ctx context.Context, fn WalkFunc) error {
|
||||
root := filepath.Join(cs.root, "blobs")
|
||||
var alg digest.Algorithm
|
||||
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
@ -133,17 +131,60 @@ func (cs *Store) Walk(fn WalkFunc) error {
|
||||
// store or extra paths not expected previously.
|
||||
}
|
||||
|
||||
return fn(path, fi, dgst)
|
||||
return fn(cs.info(dgst, fi))
|
||||
})
|
||||
}
|
||||
|
||||
// Status returns the current status of a blob by the ingest ref.
|
||||
func (s *Store) Status(ref string) (Status, error) {
|
||||
return s.status(s.ingestRoot(ref))
|
||||
func (s *store) Status(ctx context.Context, re string) ([]Status, error) {
|
||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer fp.Close()
|
||||
|
||||
fis, err := fp.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rec, err := regexp.Compile(re)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var active []Status
|
||||
for _, fi := range fis {
|
||||
p := filepath.Join(s.root, "ingest", fi.Name())
|
||||
stat, err := s.status(p)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): This is a common error if uploads are being
|
||||
// completed while making this listing. Need to consider taking a
|
||||
// lock on the whole store to coordinate this aspect.
|
||||
//
|
||||
// Another option is to cleanup downloads asynchronously and
|
||||
// coordinate this method with the cleanup process.
|
||||
//
|
||||
// For now, we just skip them, as they really don't exist.
|
||||
continue
|
||||
}
|
||||
|
||||
if !rec.MatchString(stat.Ref) {
|
||||
continue
|
||||
}
|
||||
|
||||
active = append(active, stat)
|
||||
}
|
||||
|
||||
return active, nil
|
||||
}
|
||||
|
||||
// status works like stat above except uses the path to the ingest.
|
||||
func (s *Store) status(ingestPath string) (Status, error) {
|
||||
func (s *store) status(ingestPath string) (Status, error) {
|
||||
dp := filepath.Join(ingestPath, "data")
|
||||
fi, err := os.Stat(dp)
|
||||
if err != nil {
|
||||
@ -165,7 +206,7 @@ func (s *Store) status(ingestPath string) (Status, error) {
|
||||
}
|
||||
|
||||
// total attempts to resolve the total expected size for the write.
|
||||
func (s *Store) total(ingestPath string) int64 {
|
||||
func (s *store) total(ingestPath string) int64 {
|
||||
totalS, err := readFileString(filepath.Join(ingestPath, "total"))
|
||||
if err != nil {
|
||||
return 0
|
||||
@ -185,7 +226,10 @@ func (s *Store) total(ingestPath string) int64 {
|
||||
// ref at a time.
|
||||
//
|
||||
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
|
||||
func (s *Store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) {
|
||||
func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) {
|
||||
// TODO(stevvooe): Need to actually store and handle expected here. We have
|
||||
// code in the service that shouldn't be dealing with this.
|
||||
|
||||
path, refp, data, lock, err := s.ingestPaths(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -283,11 +327,11 @@ func (s *Store) Writer(ctx context.Context, ref string, total int64, expected di
|
||||
|
||||
// Abort an active transaction keyed by ref. If the ingest is active, it will
|
||||
// be cancelled. Any resources associated with the ingest will be cleaned.
|
||||
func (s *Store) Abort(ref string) error {
|
||||
func (s *store) Abort(ctx context.Context, ref string) error {
|
||||
root := s.ingestRoot(ref)
|
||||
if err := os.RemoveAll(root); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return err
|
||||
@ -296,50 +340,11 @@ func (s *Store) Abort(ref string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) Active() ([]Status, error) {
|
||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer fp.Close()
|
||||
|
||||
fis, err := fp.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var active []Status
|
||||
for _, fi := range fis {
|
||||
p := filepath.Join(s.root, "ingest", fi.Name())
|
||||
stat, err := s.status(p)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): This is a common error if uploads are being
|
||||
// completed while making this listing. Need to consider taking a
|
||||
// lock on the whole store to coordinate this aspect.
|
||||
//
|
||||
// Another option is to cleanup downloads asynchronously and
|
||||
// coordinate this method with the cleanup process.
|
||||
//
|
||||
// For now, we just skip them, as they really don't exist.
|
||||
continue
|
||||
}
|
||||
|
||||
active = append(active, stat)
|
||||
}
|
||||
|
||||
return active, nil
|
||||
}
|
||||
|
||||
func (cs *Store) blobPath(dgst digest.Digest) string {
|
||||
func (cs *store) blobPath(dgst digest.Digest) string {
|
||||
return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
|
||||
}
|
||||
|
||||
func (s *Store) ingestRoot(ref string) string {
|
||||
func (s *store) ingestRoot(ref string) string {
|
||||
dgst := digest.FromString(ref)
|
||||
return filepath.Join(s.root, "ingest", dgst.Hex())
|
||||
}
|
||||
@ -351,7 +356,7 @@ func (s *Store) ingestRoot(ref string) string {
|
||||
// - data: file where data is written
|
||||
// - lock: lock file location
|
||||
//
|
||||
func (s *Store) ingestPaths(ref string) (string, string, string, lockfile.Lockfile, error) {
|
||||
func (s *store) ingestPaths(ref string) (string, string, string, lockfile.Lockfile, error) {
|
||||
var (
|
||||
fp = s.ingestRoot(ref)
|
||||
rp = filepath.Join(fp, "ref")
|
||||
|
15
vendor/github.com/containerd/containerd/content/store_linux.go
generated
vendored
Normal file
15
vendor/github.com/containerd/containerd/content/store_linux.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getStartTime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
}
|
4
vendor/github.com/containerd/containerd/content/store_unix.go
generated
vendored
4
vendor/github.com/containerd/containerd/content/store_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build darwin freebsd
|
||||
|
||||
package content
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
|
||||
func getStartTime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
|
||||
return time.Unix(int64(st.Ctimespec.Sec), int64(st.Ctimespec.Nsec))
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
|
2
vendor/github.com/containerd/containerd/content/writer.go
generated
vendored
2
vendor/github.com/containerd/containerd/content/writer.go
generated
vendored
@ -13,7 +13,7 @@ import (
|
||||
|
||||
// writer represents a write transaction against the blob store.
|
||||
type writer struct {
|
||||
s *Store
|
||||
s *store
|
||||
fp *os.File // opened data file
|
||||
lock lockfile.Lockfile
|
||||
path string // path to writer dir
|
||||
|
2
vendor/github.com/containerd/containerd/fs/copy.go
generated
vendored
2
vendor/github.com/containerd/containerd/fs/copy.go
generated
vendored
@ -65,7 +65,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string) error {
|
||||
}
|
||||
continue
|
||||
case (fi.Mode() & os.ModeType) == 0:
|
||||
link, err := GetLinkSource(target, fi, inodes)
|
||||
link, err := getLinkSource(target, fi, inodes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get hardlink")
|
||||
}
|
||||
|
6
vendor/github.com/containerd/containerd/fs/copy_linux.go
generated
vendored
6
vendor/github.com/containerd/containerd/fs/copy_linux.go
generated
vendored
@ -5,8 +5,9 @@ import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stevvooe/continuity/sysx"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
@ -21,7 +22,8 @@ func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := syscall.UtimesNano(name, []syscall.Timespec{st.Atim, st.Mtim}); err != nil {
|
||||
timespec := []unix.Timespec{unix.Timespec(st.Atim), unix.Timespec(st.Mtim)}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
|
||||
|
65
vendor/github.com/containerd/containerd/fs/copy_unix.go
generated
vendored
Normal file
65
vendor/github.com/containerd/containerd/fs/copy_unix.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// +build darwin freebsd
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
if err := syscall.UtimesNano(name, []syscall.Timespec{st.Atimespec, st.Mtimespec}); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
buf := bufferPool.Get().([]byte)
|
||||
_, err := io.CopyBuffer(dst, src, buf)
|
||||
bufferPool.Put(buf)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string) error {
|
||||
xattrKeys, err := sysx.LListxattr(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
data, err := sysx.LGetxattr(src, xattr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
}
|
||||
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
return syscall.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||
}
|
11
vendor/github.com/containerd/containerd/fs/diff.go
generated
vendored
11
vendor/github.com/containerd/containerd/fs/diff.go
generated
vendored
@ -16,9 +16,13 @@ import (
|
||||
type ChangeKind int
|
||||
|
||||
const (
|
||||
// ChangeKindUnmodified represents an unmodified
|
||||
// file
|
||||
ChangeKindUnmodified = iota
|
||||
|
||||
// ChangeKindAdd represents an addition of
|
||||
// a file
|
||||
ChangeKindAdd = iota
|
||||
ChangeKindAdd
|
||||
|
||||
// ChangeKindModify represents a change to
|
||||
// an existing file
|
||||
@ -31,6 +35,8 @@ const (
|
||||
|
||||
func (k ChangeKind) String() string {
|
||||
switch k {
|
||||
case ChangeKindUnmodified:
|
||||
return "unmodified"
|
||||
case ChangeKindAdd:
|
||||
return "add"
|
||||
case ChangeKindModify:
|
||||
@ -287,8 +293,11 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
|
||||
f1 = nil
|
||||
f2 = nil
|
||||
if same {
|
||||
if !isLinked(f) {
|
||||
continue
|
||||
}
|
||||
k = ChangeKindUnmodified
|
||||
}
|
||||
}
|
||||
if err := changeFn(k, p, f, nil); err != nil {
|
||||
return err
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
@ -7,8 +9,8 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stevvooe/continuity/sysx"
|
||||
)
|
||||
|
||||
// whiteouts are files with a special meaning for the layered filesystem.
|
||||
@ -90,3 +92,11 @@ func compareCapabilities(p1, p2 string) (bool, error) {
|
||||
}
|
||||
return bytes.Equal(c1, c2), nil
|
||||
}
|
||||
|
||||
func isLinked(f os.FileInfo) bool {
|
||||
s, ok := f.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return !f.IsDir() && s.Nlink > 1
|
||||
}
|
6
vendor/github.com/containerd/containerd/fs/diff_windows.go
generated
vendored
6
vendor/github.com/containerd/containerd/fs/diff_windows.go
generated
vendored
@ -1,5 +1,7 @@
|
||||
package fs
|
||||
|
||||
import "os"
|
||||
|
||||
func detectDirDiff(upper, lower string) *diffDirOptions {
|
||||
return nil
|
||||
}
|
||||
@ -13,3 +15,7 @@ func compareCapabilities(p1, p2 string) (bool, error) {
|
||||
// TODO: Use windows equivalent
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func isLinked(os.FileInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
12
vendor/github.com/containerd/containerd/fs/du.go
generated
vendored
Normal file
12
vendor/github.com/containerd/containerd/fs/du.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package fs
|
||||
|
||||
type Usage struct {
|
||||
Inodes int64
|
||||
Size int64
|
||||
}
|
||||
|
||||
// DiskUsage counts the number of inodes and disk usage for the resources under
|
||||
// path.
|
||||
func DiskUsage(roots ...string) (Usage, error) {
|
||||
return diskUsage(roots...)
|
||||
}
|
42
vendor/github.com/containerd/containerd/fs/du_unix.go
generated
vendored
Normal file
42
vendor/github.com/containerd/containerd/fs/du_unix.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// +build !windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func diskUsage(roots ...string) (Usage, error) {
|
||||
type inode struct {
|
||||
// TODO(stevvooe): Can probably reduce memory usage by not tracking
|
||||
// device, but we can leave this right for now.
|
||||
dev, ino uint64
|
||||
}
|
||||
|
||||
var (
|
||||
size int64
|
||||
inodes = map[inode]struct{}{} // expensive!
|
||||
)
|
||||
|
||||
for _, root := range roots {
|
||||
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
inodes[inode{dev: uint64(stat.Dev), ino: stat.Ino}] = struct{}{}
|
||||
size += fi.Size()
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Inodes: int64(len(inodes)),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
33
vendor/github.com/containerd/containerd/fs/du_windows.go
generated
vendored
Normal file
33
vendor/github.com/containerd/containerd/fs/du_windows.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func diskUsage(roots ...string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
)
|
||||
|
||||
// TODO(stevvooe): Support inodes (or equivalent) for windows.
|
||||
|
||||
for _, root := range roots {
|
||||
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
size += fi.Size()
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
21
vendor/github.com/containerd/containerd/fs/hardlink.go
generated
vendored
21
vendor/github.com/containerd/containerd/fs/hardlink.go
generated
vendored
@ -2,11 +2,26 @@ package fs
|
||||
|
||||
import "os"
|
||||
|
||||
// GetLinkSource returns a path for the given name and
|
||||
// GetLinkID returns an identifier representing the node a hardlink is pointing
|
||||
// to. If the file is not hard linked then 0 will be returned.
|
||||
func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return getLinkInfo(fi)
|
||||
}
|
||||
|
||||
// getLinkSource returns a path for the given name and
|
||||
// file info to its link source in the provided inode
|
||||
// map. If the given file name is not in the map and
|
||||
// has other links, it is added to the inode map
|
||||
// to be a source for other link locations.
|
||||
func GetLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
|
||||
return getHardLink(name, fi, inodes)
|
||||
func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
|
||||
inode, isHardlink := getLinkInfo(fi)
|
||||
if !isHardlink {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
path, ok := inodes[inode]
|
||||
if !ok {
|
||||
inodes[inode] = name
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
22
vendor/github.com/containerd/containerd/fs/hardlink_unix.go
generated
vendored
22
vendor/github.com/containerd/containerd/fs/hardlink_unix.go
generated
vendored
@ -3,31 +3,15 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func getHardLink(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
|
||||
if fi.IsDir() {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return "", errors.New("unsupported stat type")
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// If inode is not hardlinked, no reason to lookup or save inode
|
||||
if s.Nlink == 1 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
inode := uint64(s.Ino)
|
||||
|
||||
path, ok := inodes[inode]
|
||||
if !ok {
|
||||
inodes[inode] = name
|
||||
}
|
||||
return path, nil
|
||||
return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1
|
||||
}
|
||||
|
4
vendor/github.com/containerd/containerd/fs/hardlink_windows.go
generated
vendored
4
vendor/github.com/containerd/containerd/fs/hardlink_windows.go
generated
vendored
@ -2,6 +2,6 @@ package fs
|
||||
|
||||
import "os"
|
||||
|
||||
func getHardLink(string, os.FileInfo, map[uint64]string) (string, error) {
|
||||
return "", nil
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
70
vendor/github.com/containerd/containerd/mount_linux.go
generated
vendored
Normal file
70
vendor/github.com/containerd/containerd/mount_linux.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (m *Mount) Mount(target string) error {
|
||||
flags, data := parseMountOptions(m.Options)
|
||||
return unix.Mount(m.Source, target, m.Type, uintptr(flags), data)
|
||||
}
|
||||
|
||||
func Unmount(mount string, flags int) error {
|
||||
return unix.Unmount(mount, flags)
|
||||
}
|
||||
|
||||
// parseMountOptions takes fstab style mount options and parses them for
|
||||
// use with a standard mount() syscall
|
||||
func parseMountOptions(options []string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"async": {true, unix.MS_SYNCHRONOUS},
|
||||
"atime": {true, unix.MS_NOATIME},
|
||||
"bind": {false, unix.MS_BIND},
|
||||
"defaults": {false, 0},
|
||||
"dev": {true, unix.MS_NODEV},
|
||||
"diratime": {true, unix.MS_NODIRATIME},
|
||||
"dirsync": {false, unix.MS_DIRSYNC},
|
||||
"exec": {true, unix.MS_NOEXEC},
|
||||
"mand": {false, unix.MS_MANDLOCK},
|
||||
"noatime": {false, unix.MS_NOATIME},
|
||||
"nodev": {false, unix.MS_NODEV},
|
||||
"nodiratime": {false, unix.MS_NODIRATIME},
|
||||
"noexec": {false, unix.MS_NOEXEC},
|
||||
"nomand": {true, unix.MS_MANDLOCK},
|
||||
"norelatime": {true, unix.MS_RELATIME},
|
||||
"nostrictatime": {true, unix.MS_STRICTATIME},
|
||||
"nosuid": {false, unix.MS_NOSUID},
|
||||
"rbind": {false, unix.MS_BIND | unix.MS_REC},
|
||||
"relatime": {false, unix.MS_RELATIME},
|
||||
"remount": {false, unix.MS_REMOUNT},
|
||||
"ro": {false, unix.MS_RDONLY},
|
||||
"rw": {true, unix.MS_RDONLY},
|
||||
"strictatime": {false, unix.MS_STRICTATIME},
|
||||
"suid": {true, unix.MS_NOSUID},
|
||||
"sync": {false, unix.MS_SYNCHRONOUS},
|
||||
}
|
||||
for _, o := range options {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &^= f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
}
|
67
vendor/github.com/containerd/containerd/mount_unix.go
generated
vendored
67
vendor/github.com/containerd/containerd/mount_unix.go
generated
vendored
@ -1,72 +1,17 @@
|
||||
// +build linux
|
||||
// +build darwin freebsd
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
var (
|
||||
ErrNotImplementOnUnix = errors.New("not implemented under unix")
|
||||
)
|
||||
|
||||
func (m *Mount) Mount(target string) error {
|
||||
flags, data := parseMountOptions(m.Options)
|
||||
return unix.Mount(m.Source, target, m.Type, uintptr(flags), data)
|
||||
return ErrNotImplementOnUnix
|
||||
}
|
||||
|
||||
func Unmount(mount string, flags int) error {
|
||||
return unix.Unmount(mount, flags)
|
||||
}
|
||||
|
||||
// parseMountOptions takes fstab style mount options and parses them for
|
||||
// use with a standard mount() syscall
|
||||
func parseMountOptions(options []string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"async": {true, unix.MS_SYNCHRONOUS},
|
||||
"atime": {true, unix.MS_NOATIME},
|
||||
"bind": {false, unix.MS_BIND},
|
||||
"defaults": {false, 0},
|
||||
"dev": {true, unix.MS_NODEV},
|
||||
"diratime": {true, unix.MS_NODIRATIME},
|
||||
"dirsync": {false, unix.MS_DIRSYNC},
|
||||
"exec": {true, unix.MS_NOEXEC},
|
||||
"mand": {false, unix.MS_MANDLOCK},
|
||||
"noatime": {false, unix.MS_NOATIME},
|
||||
"nodev": {false, unix.MS_NODEV},
|
||||
"nodiratime": {false, unix.MS_NODIRATIME},
|
||||
"noexec": {false, unix.MS_NOEXEC},
|
||||
"nomand": {true, unix.MS_MANDLOCK},
|
||||
"norelatime": {true, unix.MS_RELATIME},
|
||||
"nostrictatime": {true, unix.MS_STRICTATIME},
|
||||
"nosuid": {false, unix.MS_NOSUID},
|
||||
"rbind": {false, unix.MS_BIND | unix.MS_REC},
|
||||
"relatime": {false, unix.MS_RELATIME},
|
||||
"remount": {false, unix.MS_REMOUNT},
|
||||
"ro": {false, unix.MS_RDONLY},
|
||||
"rw": {true, unix.MS_RDONLY},
|
||||
"strictatime": {false, unix.MS_STRICTATIME},
|
||||
"suid": {true, unix.MS_NOSUID},
|
||||
"sync": {false, unix.MS_SYNCHRONOUS},
|
||||
}
|
||||
for _, o := range options {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &^= f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
return ErrNotImplementOnUnix
|
||||
}
|
||||
|
11
vendor/github.com/containerd/containerd/plugin/monitor.go
generated
vendored
11
vendor/github.com/containerd/containerd/plugin/monitor.go
generated
vendored
@ -8,6 +8,8 @@ type ContainerMonitor interface {
|
||||
Monitor(containerd.Container) error
|
||||
// Stop stops and removes the provided container from the monitor
|
||||
Stop(containerd.Container) error
|
||||
// Events emits events from the monitor
|
||||
Events(chan<- *containerd.Event)
|
||||
}
|
||||
|
||||
func NewMultiContainerMonitor(monitors ...ContainerMonitor) ContainerMonitor {
|
||||
@ -31,6 +33,9 @@ func (mm *noopContainerMonitor) Stop(c containerd.Container) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mm *noopContainerMonitor) Events(events chan<- *containerd.Event) {
|
||||
}
|
||||
|
||||
type multiContainerMonitor struct {
|
||||
monitors []ContainerMonitor
|
||||
}
|
||||
@ -52,3 +57,9 @@ func (mm *multiContainerMonitor) Stop(c containerd.Container) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mm *multiContainerMonitor) Events(events chan<- *containerd.Event) {
|
||||
for _, m := range mm.monitors {
|
||||
m.Events(events)
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
2
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
@ -33,7 +33,7 @@ type InitContext struct {
|
||||
Root string
|
||||
State string
|
||||
Runtimes map[string]containerd.Runtime
|
||||
Content *content.Store
|
||||
Content content.Store
|
||||
Meta *bolt.DB
|
||||
Snapshotter snapshot.Snapshotter
|
||||
Config interface{}
|
||||
|
182
vendor/github.com/containerd/containerd/remotes/docker/auth.go
generated
vendored
Normal file
182
vendor/github.com/containerd/containerd/remotes/docker/auth.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type authenticationScheme byte
|
||||
|
||||
const (
|
||||
basicAuth authenticationScheme = 1 << iota // Defined in RFC 7617
|
||||
digestAuth // Defined in RFC 7616
|
||||
bearerAuth // Defined in RFC 6750
|
||||
)
|
||||
|
||||
// challenge carries information from a WWW-Authenticate response header.
|
||||
// See RFC 2617.
|
||||
type challenge struct {
|
||||
// scheme is the auth-scheme according to RFC 2617
|
||||
scheme authenticationScheme
|
||||
|
||||
// parameters are the auth-params according to RFC 2617
|
||||
parameters map[string]string
|
||||
}
|
||||
|
||||
type byScheme []challenge
|
||||
|
||||
func (bs byScheme) Len() int { return len(bs) }
|
||||
func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
|
||||
|
||||
// Sort in priority order: token > digest > basic
|
||||
func (bs byScheme) Less(i, j int) bool { return bs[i].scheme > bs[j].scheme }
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
type octetType byte
|
||||
|
||||
var octetTypes [256]octetType
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
func parseAuthHeader(header http.Header) []challenge {
|
||||
challenges := []challenge{}
|
||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||
v, p := parseValueAndParams(h)
|
||||
var s authenticationScheme
|
||||
switch v {
|
||||
case "basic":
|
||||
s = basicAuth
|
||||
case "digest":
|
||||
s = digestAuth
|
||||
case "bearer":
|
||||
s = bearerAuth
|
||||
default:
|
||||
continue
|
||||
}
|
||||
challenges = append(challenges, challenge{scheme: s, parameters: p})
|
||||
}
|
||||
sort.Stable(byScheme(challenges))
|
||||
return challenges
|
||||
}
|
||||
|
||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s := expectToken(header)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
for {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ",") {
|
||||
return
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
357
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
357
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -7,10 +7,12 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/containerd/images"
|
||||
@ -23,15 +25,43 @@ import (
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): Most of the code below this point is prototype code to
|
||||
// demonstrate a very simplified docker.io fetcher. We have a lot of hard coded
|
||||
// values but we leave many of the details down to the fetcher, creating a lot
|
||||
// of room for ways to fetch content.
|
||||
var (
|
||||
// ErrNoToken is returned if a request is successful but the body does not
|
||||
// contain an authorization token.
|
||||
ErrNoToken = errors.New("authorization server did not include a token in the response")
|
||||
|
||||
type dockerResolver struct{}
|
||||
// ErrInvalidAuthorization is used when credentials are passed to a server but
|
||||
// those credentials are rejected.
|
||||
ErrInvalidAuthorization = errors.New("authorization failed")
|
||||
)
|
||||
|
||||
func NewResolver() remotes.Resolver {
|
||||
return &dockerResolver{}
|
||||
type dockerResolver struct {
|
||||
credentials func(string) (string, string, error)
|
||||
plainHTTP bool
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// ResolverOptions are used to configured a new Docker register resolver
|
||||
type ResolverOptions struct {
|
||||
// Credentials provides username and secret given a host.
|
||||
// If username is empty but a secret is given, that secret
|
||||
// is interpretted as a long lived token.
|
||||
Credentials func(string) (string, string, error)
|
||||
|
||||
// PlainHTTP specifies to use plain http and not https
|
||||
PlainHTTP bool
|
||||
|
||||
// Client is the http client to used when making registry requests
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
// NewResolver returns a new resolver to a Docker registry
|
||||
func NewResolver(options ResolverOptions) remotes.Resolver {
|
||||
return &dockerResolver{
|
||||
credentials: options.Credentials,
|
||||
plainHTTP: options.PlainHTTP,
|
||||
client: options.Client,
|
||||
}
|
||||
}
|
||||
|
||||
var _ remotes.Resolver = &dockerResolver{}
|
||||
@ -44,30 +74,37 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
|
||||
var (
|
||||
base url.URL
|
||||
token string
|
||||
username, secret string
|
||||
)
|
||||
|
||||
switch refspec.Hostname() {
|
||||
case "docker.io":
|
||||
host := refspec.Hostname()
|
||||
base.Scheme = "https"
|
||||
|
||||
if host == "docker.io" {
|
||||
base.Host = "registry-1.docker.io"
|
||||
prefix := strings.TrimPrefix(refspec.Locator, "docker.io/")
|
||||
base.Path = path.Join("/v2", prefix)
|
||||
token, err = getToken(ctx, "repository:"+prefix+":pull")
|
||||
} else {
|
||||
base.Host = host
|
||||
|
||||
if r.plainHTTP || strings.HasPrefix(host, "localhost:") {
|
||||
base.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
if r.credentials != nil {
|
||||
username, secret, err = r.credentials(base.Host)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, nil, err
|
||||
}
|
||||
case "localhost:5000":
|
||||
base.Scheme = "http"
|
||||
base.Host = "localhost:5000"
|
||||
base.Path = path.Join("/v2", strings.TrimPrefix(refspec.Locator, "localhost:5000/"))
|
||||
default:
|
||||
return "", ocispec.Descriptor{}, nil, errors.Errorf("unsupported locator: %q", refspec.Locator)
|
||||
}
|
||||
|
||||
prefix := strings.TrimPrefix(refspec.Locator, host+"/")
|
||||
base.Path = path.Join("/v2", prefix)
|
||||
|
||||
fetcher := &dockerFetcher{
|
||||
base: base,
|
||||
token: token,
|
||||
client: r.client,
|
||||
username: username,
|
||||
secret: secret,
|
||||
}
|
||||
|
||||
var (
|
||||
@ -125,16 +162,13 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
|
||||
if dgstHeader != "" {
|
||||
if err := dgstHeader.Validate(); err != nil {
|
||||
if err == nil {
|
||||
return "", ocispec.Descriptor{}, nil, errors.Errorf("%q in header not a valid digest", dgstHeader)
|
||||
}
|
||||
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
|
||||
}
|
||||
dgst = dgstHeader
|
||||
}
|
||||
|
||||
if dgst == "" {
|
||||
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "could not resolve digest for %v", ref)
|
||||
return "", ocispec.Descriptor{}, nil, errors.Errorf("could not resolve digest for %v", ref)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -143,8 +177,12 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
)
|
||||
|
||||
size, err = strconv.ParseInt(sizeHeader, 10, 64)
|
||||
if err != nil || size < 0 {
|
||||
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "%q in header not a valid size", sizeHeader)
|
||||
if err != nil {
|
||||
|
||||
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "invalid size header: %q", sizeHeader)
|
||||
}
|
||||
if size < 0 {
|
||||
return "", ocispec.Descriptor{}, nil, errors.Errorf("%q in header not a valid size", sizeHeader)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
@ -163,6 +201,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
type dockerFetcher struct {
|
||||
base url.URL
|
||||
token string
|
||||
|
||||
client *http.Client
|
||||
useBasic bool
|
||||
username string
|
||||
secret string
|
||||
}
|
||||
|
||||
func (r *dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
|
||||
@ -213,13 +256,14 @@ func (r *dockerFetcher) url(ps ...string) string {
|
||||
}
|
||||
|
||||
func (r *dockerFetcher) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
|
||||
log.G(ctx).WithField("request.headers", req.Header).Debug("fetch content")
|
||||
if r.token != "" {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.token))
|
||||
return r.doRequestWithRetries(ctx, req, nil)
|
||||
}
|
||||
|
||||
resp, err := ctxhttp.Do(ctx, http.DefaultClient, req)
|
||||
func (r *dockerFetcher) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
|
||||
log.G(ctx).WithField("request.headers", req.Header).Debug("fetch content")
|
||||
r.authorize(req)
|
||||
resp, err := ctxhttp.Do(ctx, r.client, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -228,50 +272,119 @@ func (r *dockerFetcher) doRequest(ctx context.Context, req *http.Request) (*http
|
||||
"response.headers": resp.Header,
|
||||
}).Debug("fetch response received")
|
||||
|
||||
responses = append(responses, resp)
|
||||
req, err = r.retryRequest(ctx, req, responses)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if req != nil {
|
||||
return r.doRequestWithRetries(ctx, req, responses)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func getToken(ctx context.Context, scopes ...string) (string, error) {
|
||||
var (
|
||||
u = url.URL{
|
||||
Scheme: "https",
|
||||
Host: "auth.docker.io",
|
||||
Path: "/token",
|
||||
func (r *dockerFetcher) authorize(req *http.Request) {
|
||||
if r.useBasic {
|
||||
req.SetBasicAuth(r.username, r.secret)
|
||||
} else if r.token != "" {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.token))
|
||||
}
|
||||
}
|
||||
|
||||
q = url.Values{
|
||||
"scope": scopes,
|
||||
"service": []string{"registry.docker.io"}, // usually comes from auth challenge
|
||||
func (r *dockerFetcher) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) {
|
||||
if len(responses) > 5 {
|
||||
return nil, nil
|
||||
}
|
||||
last := responses[len(responses)-1]
|
||||
if last.StatusCode == http.StatusUnauthorized {
|
||||
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
|
||||
for _, c := range parseAuthHeader(last.Header) {
|
||||
if c.scheme == bearerAuth {
|
||||
if errStr := c.parameters["error"]; errStr != "" {
|
||||
// TODO: handle expired case
|
||||
return nil, errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr)
|
||||
}
|
||||
if err := r.setTokenAuth(ctx, c.parameters); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return req, nil
|
||||
} else if c.scheme == basicAuth {
|
||||
if r.username != "" && r.secret != "" {
|
||||
r.useBasic = true
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
} else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead {
|
||||
// Support registries which have not properly implemented the HEAD method for
|
||||
// manifests endpoint
|
||||
if strings.Contains(req.URL.Path, "/manifests/") {
|
||||
// TODO: copy request?
|
||||
req.Method = http.MethodGet
|
||||
return req, nil
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
// TODO: Handle 50x errors accounting for attempt history
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.G(ctx).WithField("token.url", u.String()).Debug("requesting token")
|
||||
resp, err := ctxhttp.Get(ctx, http.DefaultClient, u.String())
|
||||
func isManifestAccept(h http.Header) bool {
|
||||
for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] {
|
||||
switch ah {
|
||||
case images.MediaTypeDockerSchema2Manifest:
|
||||
fallthrough
|
||||
case images.MediaTypeDockerSchema2ManifestList:
|
||||
fallthrough
|
||||
case ocispec.MediaTypeImageManifest:
|
||||
fallthrough
|
||||
case ocispec.MediaTypeImageIndex:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *dockerFetcher) setTokenAuth(ctx context.Context, params map[string]string) error {
|
||||
realm, ok := params["realm"]
|
||||
if !ok {
|
||||
return errors.New("no realm specified for token auth challenge")
|
||||
}
|
||||
|
||||
realmURL, err := url.Parse(realm)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode > 299 {
|
||||
return "", errors.Errorf("unexpected status code: %v %v", resp.StatusCode, resp.Status)
|
||||
return fmt.Errorf("invalid token auth challenge realm: %s", err)
|
||||
}
|
||||
|
||||
p, err := ioutil.ReadAll(resp.Body)
|
||||
to := tokenOptions{
|
||||
realm: realmURL.String(),
|
||||
service: params["service"],
|
||||
}
|
||||
|
||||
scope, ok := params["scope"]
|
||||
if !ok {
|
||||
return errors.Errorf("no scope specified for token auth challenge")
|
||||
}
|
||||
|
||||
// TODO: Get added scopes from context
|
||||
to.scopes = []string{scope}
|
||||
|
||||
if r.secret != "" {
|
||||
// Credential information is provided, use oauth POST endpoint
|
||||
r.token, err = r.fetchTokenWithOAuth(ctx, to)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return errors.Wrap(err, "failed to fetch oauth token")
|
||||
}
|
||||
} else {
|
||||
// Do request anonymously
|
||||
r.token, err = r.getToken(ctx, to)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to fetch anonymous token")
|
||||
}
|
||||
}
|
||||
|
||||
var tokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(p, &tokenResponse); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return tokenResponse.Token, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// getV2URLPaths generates the candidate urls paths for the object based on the
|
||||
@ -291,3 +404,125 @@ func getV2URLPaths(desc ocispec.Descriptor) ([]string, error) {
|
||||
|
||||
return urls, nil
|
||||
}
|
||||
|
||||
type tokenOptions struct {
|
||||
realm string
|
||||
service string
|
||||
scopes []string
|
||||
}
|
||||
|
||||
type postTokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
func (r *dockerFetcher) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
|
||||
form := url.Values{}
|
||||
form.Set("scope", strings.Join(to.scopes, " "))
|
||||
form.Set("service", to.service)
|
||||
// TODO: Allow setting client_id
|
||||
form.Set("client_id", "containerd-dist-tool")
|
||||
|
||||
if r.username == "" {
|
||||
form.Set("grant_type", "refresh_token")
|
||||
form.Set("refresh_token", r.secret)
|
||||
} else {
|
||||
form.Set("grant_type", "password")
|
||||
form.Set("username", r.username)
|
||||
form.Set("password", r.secret)
|
||||
}
|
||||
|
||||
resp, err := ctxhttp.PostForm(ctx, r.client, to.realm, form)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == 405 && r.username != "" {
|
||||
// It would be nice if registries would implement the specifications
|
||||
return r.getToken(ctx, to)
|
||||
} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"status": resp.Status,
|
||||
"body": string(b),
|
||||
}).Debugf("token request failed")
|
||||
// TODO: handle error body and write debug output
|
||||
return "", errors.Errorf("unexpected status: %s", resp.Status)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
var tr postTokenResponse
|
||||
if err = decoder.Decode(&tr); err != nil {
|
||||
return "", fmt.Errorf("unable to decode token response: %s", err)
|
||||
}
|
||||
|
||||
return tr.AccessToken, nil
|
||||
}
|
||||
|
||||
type getTokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
}
|
||||
|
||||
// getToken fetches a token using a GET request
|
||||
func (r *dockerFetcher) getToken(ctx context.Context, to tokenOptions) (string, error) {
|
||||
req, err := http.NewRequest("GET", to.realm, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
reqParams := req.URL.Query()
|
||||
|
||||
if to.service != "" {
|
||||
reqParams.Add("service", to.service)
|
||||
}
|
||||
|
||||
for _, scope := range to.scopes {
|
||||
reqParams.Add("scope", scope)
|
||||
}
|
||||
|
||||
if r.secret != "" {
|
||||
req.SetBasicAuth(r.username, r.secret)
|
||||
}
|
||||
|
||||
req.URL.RawQuery = reqParams.Encode()
|
||||
|
||||
resp, err := ctxhttp.Do(ctx, r.client, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
// TODO: handle error body and write debug output
|
||||
return "", errors.Errorf("unexpected status: %s", resp.Status)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
var tr getTokenResponse
|
||||
if err = decoder.Decode(&tr); err != nil {
|
||||
return "", fmt.Errorf("unable to decode token response: %s", err)
|
||||
}
|
||||
|
||||
// `access_token` is equivalent to `token` and if both are specified
|
||||
// the choice is undefined. Canonicalize `access_token` by sticking
|
||||
// things in `token`.
|
||||
if tr.AccessToken != "" {
|
||||
tr.Token = tr.AccessToken
|
||||
}
|
||||
|
||||
if tr.Token == "" {
|
||||
return "", ErrNoToken
|
||||
}
|
||||
|
||||
return tr.Token, nil
|
||||
}
|
||||
|
15
vendor/github.com/containerd/containerd/rootfs/apply.go
generated
vendored
15
vendor/github.com/containerd/containerd/rootfs/apply.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/archive"
|
||||
@ -41,6 +42,7 @@ func ApplyLayer(snapshots snapshot.Snapshotter, mounter Mounter, rd io.Reader, p
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "creating temporary directory failed")
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// TODO(stevvooe): Choose this key WAY more carefully. We should be able to
|
||||
// create collisions for concurrent, conflicting unpack processes but we
|
||||
@ -69,7 +71,7 @@ func ApplyLayer(snapshots snapshot.Snapshotter, mounter Mounter, rd io.Reader, p
|
||||
digester := digest.Canonical.Digester() // used to calculate diffID.
|
||||
rd = io.TeeReader(rd, digester.Hash())
|
||||
|
||||
if _, err := archive.Apply(context.Background(), key, rd); err != nil {
|
||||
if _, err := archive.Apply(context.Background(), dir, rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -80,7 +82,7 @@ func ApplyLayer(snapshots snapshot.Snapshotter, mounter Mounter, rd io.Reader, p
|
||||
chainID = identity.ChainID([]digest.Digest{parent, chainID})
|
||||
}
|
||||
if _, err := snapshots.Stat(ctx, chainID.String()); err == nil {
|
||||
return diffID, nil //TODO: call snapshots.Remove(ctx, key) once implemented
|
||||
return diffID, snapshots.Remove(ctx, key)
|
||||
}
|
||||
|
||||
return diffID, snapshots.Commit(ctx, chainID.String(), key)
|
||||
@ -107,14 +109,11 @@ func Prepare(ctx context.Context, snapshots snapshot.Snapshotter, mounter Mounte
|
||||
)
|
||||
|
||||
for _, layer := range layers {
|
||||
// TODO: layer.Digest should not be string
|
||||
// (https://github.com/opencontainers/image-spec/pull/514)
|
||||
layerDigest := digest.Digest(layer.Digest)
|
||||
// This will convert a possibly compressed layer hash to the
|
||||
// uncompressed hash, if we know about it. If we don't, we unpack and
|
||||
// calculate it. If we do have it, we then calculate the chain id for
|
||||
// the application and see if the snapshot is there.
|
||||
diffID := resolveDiffID(layerDigest)
|
||||
diffID := resolveDiffID(layer.Digest)
|
||||
if diffID != "" {
|
||||
chainLocal := append(chain, diffID)
|
||||
chainID := identity.ChainID(chainLocal)
|
||||
@ -124,7 +123,7 @@ func Prepare(ctx context.Context, snapshots snapshot.Snapshotter, mounter Mounte
|
||||
}
|
||||
}
|
||||
|
||||
rc, err := openBlob(ctx, layerDigest)
|
||||
rc, err := openBlob(ctx, layer.Digest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -139,7 +138,7 @@ func Prepare(ctx context.Context, snapshots snapshot.Snapshotter, mounter Mounte
|
||||
// For uncompressed layers, this will be the same. For compressed
|
||||
// layers, we can look up the diffID from the digest if we've already
|
||||
// unpacked it.
|
||||
if err := registerDiffID(diffID, layerDigest); err != nil {
|
||||
if err := registerDiffID(diffID, layer.Digest); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
11
vendor/github.com/containerd/containerd/services/content/helpers.go
generated
vendored
11
vendor/github.com/containerd/containerd/services/content/helpers.go
generated
vendored
@ -17,3 +17,14 @@ func rewriteGRPCError(err error) error {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func serverErrorToGRPC(err error, id string) error {
|
||||
switch {
|
||||
case content.IsNotFound(err):
|
||||
return grpc.Errorf(codes.NotFound, "%v: not found", id)
|
||||
case content.IsExists(err):
|
||||
return grpc.Errorf(codes.AlreadyExists, "%v: exists", id)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1,35 +1,9 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content"
|
||||
"github.com/containerd/containerd/content"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func NewProviderFromClient(client contentapi.ContentClient) content.Provider {
|
||||
return &remoteProvider{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type remoteProvider struct {
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
func (rp *remoteProvider) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
|
||||
client, err := rp.client.Read(ctx, &contentapi.ReadRequest{Digest: dgst})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &remoteReader{
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type remoteReader struct {
|
||||
client contentapi.Content_ReadClient
|
||||
extra []byte
|
108
vendor/github.com/containerd/containerd/services/content/service.go
generated
vendored
108
vendor/github.com/containerd/containerd/services/content/service.go
generated
vendored
@ -18,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
store *content.Store
|
||||
store content.Store
|
||||
}
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
@ -52,25 +52,68 @@ func (s *Service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResp
|
||||
return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
|
||||
}
|
||||
|
||||
bi, err := s.store.Info(req.Digest)
|
||||
bi, err := s.store.Info(ctx, req.Digest)
|
||||
if err != nil {
|
||||
return nil, maybeNotFoundGRPC(err, req.Digest.String())
|
||||
return nil, serverErrorToGRPC(err, req.Digest.String())
|
||||
}
|
||||
|
||||
return &api.InfoResponse{
|
||||
Digest: req.Digest,
|
||||
Info: api.Info{
|
||||
Digest: bi.Digest,
|
||||
Size_: bi.Size,
|
||||
CommittedAt: bi.CommittedAt,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
|
||||
var (
|
||||
buffer []api.Info
|
||||
sendBlock = func(block []api.Info) error {
|
||||
// send last block
|
||||
return session.Send(&api.ListContentResponse{
|
||||
Info: block,
|
||||
})
|
||||
}
|
||||
)
|
||||
|
||||
if err := s.store.Walk(session.Context(), func(info content.Info) error {
|
||||
buffer = append(buffer, api.Info{
|
||||
Digest: info.Digest,
|
||||
Size_: info.Size,
|
||||
CommittedAt: info.CommittedAt,
|
||||
})
|
||||
|
||||
if len(buffer) >= 100 {
|
||||
if err := sendBlock(buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buffer = buffer[:0]
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(buffer) > 0 {
|
||||
// send last block
|
||||
if err := sendBlock(buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) {
|
||||
if err := req.Digest.Validate(); err != nil {
|
||||
return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
if err := s.store.Delete(req.Digest); err != nil {
|
||||
return nil, maybeNotFoundGRPC(err, req.Digest.String())
|
||||
if err := s.store.Delete(ctx, req.Digest); err != nil {
|
||||
return nil, serverErrorToGRPC(err, req.Digest.String())
|
||||
}
|
||||
|
||||
return &empty.Empty{}, nil
|
||||
@ -81,14 +124,14 @@ func (s *Service) Read(req *api.ReadRequest, session api.Content_ReadServer) err
|
||||
return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
|
||||
}
|
||||
|
||||
oi, err := s.store.Info(req.Digest)
|
||||
oi, err := s.store.Info(session.Context(), req.Digest)
|
||||
if err != nil {
|
||||
return maybeNotFoundGRPC(err, req.Digest.String())
|
||||
return serverErrorToGRPC(err, req.Digest.String())
|
||||
}
|
||||
|
||||
rc, err := s.store.Reader(session.Context(), req.Digest)
|
||||
if err != nil {
|
||||
return maybeNotFoundGRPC(err, req.Digest.String())
|
||||
return serverErrorToGRPC(err, req.Digest.String())
|
||||
}
|
||||
defer rc.Close() // TODO(stevvooe): Cache these file descriptors for performance.
|
||||
|
||||
@ -132,6 +175,10 @@ func (s *Service) Read(req *api.ReadRequest, session api.Content_ReadServer) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// readResponseWriter is a writer that places the output into ReadResponse messages.
|
||||
//
|
||||
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
|
||||
// into the buffer size.
|
||||
type readResponseWriter struct {
|
||||
offset int64
|
||||
session api.Content_ReadServer
|
||||
@ -149,6 +196,27 @@ func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (s *Service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
|
||||
statuses, err := s.store.Status(ctx, req.Regexp)
|
||||
if err != nil {
|
||||
return nil, serverErrorToGRPC(err, req.Regexp)
|
||||
}
|
||||
|
||||
var resp api.StatusResponse
|
||||
for _, status := range statuses {
|
||||
resp.Statuses = append(resp.Statuses, api.Status{
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Ref: status.Ref,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
})
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
||||
var (
|
||||
ctx = session.Context()
|
||||
@ -243,8 +311,8 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
||||
}
|
||||
expected = req.Expected
|
||||
|
||||
if _, err := s.store.Info(req.Expected); err == nil {
|
||||
if err := s.store.Abort(ref); err != nil {
|
||||
if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
|
||||
if err := s.store.Abort(session.Context(), ref); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to abort write")
|
||||
}
|
||||
|
||||
@ -304,12 +372,10 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
||||
if err := wr.Commit(total, expected); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
msg.Digest = wr.Digest()
|
||||
}
|
||||
case api.WriteActionAbort:
|
||||
return s.store.Abort(ref)
|
||||
}
|
||||
|
||||
if err := session.Send(&msg); err != nil {
|
||||
return err
|
||||
@ -324,18 +390,12 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Status(*api.StatusRequest, api.Content_StatusServer) error {
|
||||
return grpc.Errorf(codes.Unimplemented, "not implemented")
|
||||
func (s *Service) Abort(ctx context.Context, req *api.AbortRequest) (*empty.Empty, error) {
|
||||
if err := s.store.Abort(ctx, req.Ref); err != nil {
|
||||
return nil, serverErrorToGRPC(err, req.Ref)
|
||||
}
|
||||
|
||||
func maybeNotFoundGRPC(err error, id string) error {
|
||||
if content.IsNotFound(err) {
|
||||
return grpc.Errorf(codes.NotFound, "%v: not found", id)
|
||||
}
|
||||
|
||||
return err
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
155
vendor/github.com/containerd/containerd/services/content/store.go
generated
vendored
Normal file
155
vendor/github.com/containerd/containerd/services/content/store.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content"
|
||||
"github.com/containerd/containerd/content"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type remoteStore struct {
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
func NewStoreFromClient(client contentapi.ContentClient) content.Store {
|
||||
return &remoteStore{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *remoteStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{
|
||||
Digest: dgst,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
return content.Info{
|
||||
Digest: resp.Info.Digest,
|
||||
Size: resp.Info.Size_,
|
||||
CommittedAt: resp.Info.CommittedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rs *remoteStore) Walk(ctx context.Context, fn content.WalkFunc) error {
|
||||
session, err := rs.client.List(ctx, &contentapi.ListContentRequest{})
|
||||
if err != nil {
|
||||
return rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
for {
|
||||
msg, err := session.Recv()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
for _, info := range msg.Info {
|
||||
if err := fn(content.Info{
|
||||
Digest: info.Digest,
|
||||
Size: info.Size_,
|
||||
CommittedAt: info.CommittedAt,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *remoteStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{
|
||||
Digest: dgst,
|
||||
}); err != nil {
|
||||
return rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *remoteStore) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
|
||||
client, err := rs.client.Read(ctx, &contentapi.ReadRequest{Digest: dgst})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &remoteReader{
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rs *remoteStore) Status(ctx context.Context, re string) ([]content.Status, error) {
|
||||
resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{
|
||||
Regexp: re,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
var statuses []content.Status
|
||||
for _, status := range resp.Statuses {
|
||||
statuses = append(statuses, content.Status{
|
||||
Ref: status.Ref,
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
})
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
func (rs *remoteStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
|
||||
wrclient, offset, err := rs.negotiate(ctx, ref, size, expected)
|
||||
if err != nil {
|
||||
return nil, rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
return &remoteWriter{
|
||||
client: wrclient,
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Abort implements asynchronous abort. It starts a new write session on the ref l
|
||||
func (rs *remoteStore) Abort(ctx context.Context, ref string) error {
|
||||
if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{
|
||||
Ref: ref,
|
||||
}); err != nil {
|
||||
return rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *remoteStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
||||
wrclient, err := rs.client.Write(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := wrclient.Send(&contentapi.WriteRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
Ref: ref,
|
||||
Total: size,
|
||||
Expected: expected,
|
||||
}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
resp, err := wrclient.Recv()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return wrclient, resp.Offset, nil
|
||||
}
|
@ -1,61 +1,14 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content"
|
||||
"github.com/containerd/containerd/content"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewIngesterFromClient(client contentapi.ContentClient) content.Ingester {
|
||||
return &remoteIngester{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type remoteIngester struct {
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
func (ri *remoteIngester) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
|
||||
wrclient, offset, err := ri.negotiate(ctx, ref, size, expected)
|
||||
if err != nil {
|
||||
return nil, rewriteGRPCError(err)
|
||||
}
|
||||
|
||||
return &remoteWriter{
|
||||
client: wrclient,
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ri *remoteIngester) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
||||
wrclient, err := ri.client.Write(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := wrclient.Send(&contentapi.WriteRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
Ref: ref,
|
||||
Total: size,
|
||||
Expected: expected,
|
||||
}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
resp, err := wrclient.Recv()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return wrclient, resp.Offset, nil
|
||||
}
|
||||
|
||||
type remoteWriter struct {
|
||||
ref string
|
||||
client contentapi.Content_WriteClient
|
||||
@ -127,6 +80,9 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
rw.offset += int64(n)
|
||||
if resp.Digest != "" {
|
||||
rw.digest = resp.Digest
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -149,6 +105,8 @@ func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
|
||||
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
}
|
||||
|
||||
rw.digest = resp.Digest
|
||||
rw.offset = resp.Offset
|
||||
return nil
|
||||
}
|
||||
|
4
vendor/github.com/containerd/containerd/services/rootfs/service.go
generated
vendored
4
vendor/github.com/containerd/containerd/services/rootfs/service.go
generated
vendored
@ -26,11 +26,11 @@ func init() {
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
store *content.Store
|
||||
store content.Store
|
||||
snapshotter snapshot.Snapshotter
|
||||
}
|
||||
|
||||
func NewService(store *content.Store, snapshotter snapshot.Snapshotter) (*Service, error) {
|
||||
func NewService(store content.Store, snapshotter snapshot.Snapshotter) (*Service, error) {
|
||||
return &Service{
|
||||
store: store,
|
||||
snapshotter: snapshotter,
|
||||
|
49
vendor/github.com/containerd/containerd/snapshot/snapshotter.go
generated
vendored
49
vendor/github.com/containerd/containerd/snapshot/snapshotter.go
generated
vendored
@ -23,6 +23,24 @@ type Info struct {
|
||||
Readonly bool // true if readonly, only valid for active
|
||||
}
|
||||
|
||||
// Usage defines statistics for disk resources consumed by the snapshot.
|
||||
//
|
||||
// These resources only include the resources consumed by the snapshot itself
|
||||
// and does not include resources usage by the parent.
|
||||
type Usage struct {
|
||||
Inodes int64 // number of inodes in use.
|
||||
Size int64 // provides usage, in bytes, of snapshot
|
||||
}
|
||||
|
||||
func (u *Usage) Add(other Usage) {
|
||||
u.Size += other.Size
|
||||
|
||||
// TODO(stevvooe): assumes independent inodes, but provides and upper
|
||||
// bound. This should be pretty close, assumming the inodes for a
|
||||
// snapshot are roughly unique to it. Don't trust this assumption.
|
||||
u.Inodes += other.Inodes
|
||||
}
|
||||
|
||||
// Snapshotter defines the methods required to implement a snapshot snapshotter for
|
||||
// allocating, snapshotting and mounting filesystem changesets. The model works
|
||||
// by building up sets of changes with parent-child relationships.
|
||||
@ -45,6 +63,7 @@ type Info struct {
|
||||
// For consistency, we define the following terms to be used throughout this
|
||||
// interface for snapshotter implementations:
|
||||
//
|
||||
// `ctx` - refers to a context.Context
|
||||
// `key` - refers to an active snapshot
|
||||
// `name` - refers to a committed snapshot
|
||||
// `parent` - refers to the parent in relation
|
||||
@ -71,14 +90,14 @@ type Info struct {
|
||||
// We start by using a Snapshotter to Prepare a new snapshot transaction, using a
|
||||
// key and descending from the empty parent "":
|
||||
//
|
||||
// mounts, err := snapshotter.Prepare(key, "")
|
||||
// mounts, err := snapshotter.Prepare(ctx, key, "")
|
||||
// if err != nil { ... }
|
||||
//
|
||||
// We get back a list of mounts from Snapshotter.Prepare, with the key identifying
|
||||
// the active snapshot. Mount this to the temporary location with the
|
||||
// following:
|
||||
//
|
||||
// if err := MountAll(mounts, tmpDir); err != nil { ... }
|
||||
// if err := containerd.MountAll(mounts, tmpDir); err != nil { ... }
|
||||
//
|
||||
// Once the mounts are performed, our temporary location is ready to capture
|
||||
// a diff. In practice, this works similar to a filesystem transaction. The
|
||||
@ -102,21 +121,21 @@ type Info struct {
|
||||
// snapshot to a name. For this example, we are just going to use the layer
|
||||
// digest, but in practice, this will probably be the ChainID:
|
||||
//
|
||||
// if err := snapshotter.Commit(digest.String(), key); err != nil { ... }
|
||||
// if err := snapshotter.Commit(ctx, digest.String(), key); err != nil { ... }
|
||||
//
|
||||
// Now, we have a layer in the Snapshotter that can be accessed with the digest
|
||||
// provided during commit. Once you have committed the snapshot, the active
|
||||
// snapshot can be removed with the following:
|
||||
//
|
||||
// snapshotter.Remove(key)
|
||||
// snapshotter.Remove(ctx, key)
|
||||
//
|
||||
// Importing the Next Layer
|
||||
//
|
||||
// Making a layer depend on the above is identical to the process described
|
||||
// above except that the parent is provided as parent when calling
|
||||
// Manager.Prepare, assuming a clean tmpLocation:
|
||||
// Manager.Prepare, assuming a clean, unique key identifier:
|
||||
//
|
||||
// mounts, err := snapshotter.Prepare(tmpLocation, parentDigest)
|
||||
// mounts, err := snapshotter.Prepare(ctx, key, parentDigest)
|
||||
//
|
||||
// We then mount, apply and commit, as we did above. The new snapshot will be
|
||||
// based on the content of the previous one.
|
||||
@ -127,13 +146,13 @@ type Info struct {
|
||||
// snapshot as the parent. After mounting, the prepared path can
|
||||
// be used directly as the container's filesystem:
|
||||
//
|
||||
// mounts, err := snapshotter.Prepare(containerKey, imageRootFSChainID)
|
||||
// mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID)
|
||||
//
|
||||
// The returned mounts can then be passed directly to the container runtime. If
|
||||
// one would like to create a new image from the filesystem, Manager.Commit is
|
||||
// called:
|
||||
//
|
||||
// if err := snapshotter.Commit(newImageSnapshot, containerKey); err != nil { ... }
|
||||
// if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... }
|
||||
//
|
||||
// Alternatively, for most container runs, Snapshotter.Remove will be called to
|
||||
// signal the Snapshotter to abandon the changes.
|
||||
@ -145,6 +164,16 @@ type Snapshotter interface {
|
||||
// the kind of snapshot.
|
||||
Stat(ctx context.Context, key string) (Info, error)
|
||||
|
||||
// Usage returns the resource usage of an active or committed snapshot
|
||||
// excluding the usage of parent snapshots.
|
||||
//
|
||||
// The running time of this call for active snapshots is dependent on
|
||||
// implementation, but may be proportional to the size of the resource.
|
||||
// Callers should take this into consideration. Implementations should
|
||||
// attempt to honer context cancellation and avoid taking locks when making
|
||||
// the calculation.
|
||||
Usage(ctx context.Context, key string) (Usage, error)
|
||||
|
||||
// Mounts returns the mounts for the active snapshot transaction identified
|
||||
// by key. Can be called on an read-write or readonly transaction. This is
|
||||
// available only for active snapshots.
|
||||
@ -203,7 +232,7 @@ type Snapshotter interface {
|
||||
// removed before proceeding.
|
||||
Remove(ctx context.Context, key string) error
|
||||
|
||||
// Walk the committed snapshots. For each snapshot in the snapshotter, the
|
||||
// function will be called.
|
||||
// Walk all snapshots in the snapshotter. For each snapshot in the
|
||||
// snapshotter, the function will be called.
|
||||
Walk(ctx context.Context, fn func(context.Context, Info) error) error
|
||||
}
|
||||
|
5
vendor/github.com/containerd/containerd/sys/socket_unix.go
generated
vendored
5
vendor/github.com/containerd/containerd/sys/socket_unix.go
generated
vendored
@ -28,6 +28,11 @@ func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
|
||||
return l, err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, 0660); err != nil {
|
||||
l.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.Chown(path, uid, gid); err != nil {
|
||||
l.Close()
|
||||
return nil, err
|
||||
|
18
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
18
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
@ -1,7 +1,9 @@
|
||||
github.com/crosbymichael/go-runc 65847bfc51952703ca24b564d10de50d3f2db6e7
|
||||
github.com/crosbymichael/console f13f890e20a94bdec6c328cdf9410b7158f0cfa4
|
||||
github.com/crosbymichael/cgroups a692a19766b072b86d89620c97a7916b2e2de3e7
|
||||
github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
|
||||
github.com/containerd/go-runc 5fe4d8cb7fdc0fae5f5a7f4f1d65a565032401b2
|
||||
github.com/containerd/console a3863895279f5104533fd999c1babf80faffd98c
|
||||
github.com/containerd/cgroups 7b2d1a0f50963678d5799e29d17a4d611f5a5dee
|
||||
github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
|
||||
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
|
||||
github.com/prometheus/client_golang v0.8.0
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 195bde7883f7c39ea62b0d92ab7359b5327065cb
|
||||
@ -14,24 +16,24 @@ github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/opencontainers/runc 50401b5b4c2e01e4f1372b73a021742deeaf4e2d
|
||||
github.com/opencontainers/runtime-spec 035da1dca3dfbb00d752eb58b0b158d6129f3776
|
||||
github.com/Sirupsen/logrus v0.11.0
|
||||
github.com/stevvooe/go-btrfs ea304655a3ed8f00773db1844f921d12541ee0d1
|
||||
github.com/containerd/btrfs e9c546f46bccffefe71a6bc137e4c21b5503cc18
|
||||
github.com/stretchr/testify v1.1.4
|
||||
github.com/davecgh/go-spew v1.1.0
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/tonistiigi/fifo fe870ccf293940774c2b44e23f6c71fff8f7547d
|
||||
github.com/containerd/fifo 1c36a62ed52ac0235d524d6371b746db4e4eef72
|
||||
github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca
|
||||
golang.org/x/net 8b4af36cd21a1f85a7484b49feb7c79363106d8e
|
||||
google.golang.org/grpc v1.0.5
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/nightlyone/lockfile 1d49c987357a327b5b03aa84cbddd582c328615d
|
||||
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
|
||||
golang.org/x/sys/unix f3918c30c5c2cb527c0b071a27c35120a6c0719a
|
||||
golang.org/x/sys f3918c30c5c2cb527c0b071a27c35120a6c0719a
|
||||
github.com/opencontainers/image-spec a431dbcf6a74fca2e0e040b819a836dbe3fb23ca
|
||||
github.com/stevvooe/continuity 577e137350afb00343495f55bb8671fe7e22b0bf
|
||||
github.com/containerd/continuity 6414d06cab9e2fe082ea29ff42aab627e740d00c
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
github.com/BurntSushi/toml v0.2.0-21-g9906417
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
github.com/Microsoft/go-winio fff283ad5116362ca252298cfc9b95828956d85d
|
||||
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
|
||||
github.com/Microsoft/hcsshim v0.5.15
|
||||
github.com/Azure/go-ansiterm/winterm fa152c58bc15761d0200cb75fe958b89a9d4888e
|
||||
github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e
|
||||
|
49
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
49
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
@ -6,16 +6,19 @@
|
||||
package assert
|
||||
|
||||
import (
|
||||
|
||||
http "net/http"
|
||||
url "net/url"
|
||||
time "time"
|
||||
)
|
||||
|
||||
|
||||
// Condition uses a Comparison to assert a complex condition.
|
||||
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
|
||||
return Condition(a.t, comp, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Contains asserts that the specified string, list(array, slice...) or map contains the
|
||||
// specified substring or element.
|
||||
//
|
||||
@ -28,6 +31,7 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ..
|
||||
return Contains(a.t, s, contains, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@ -38,6 +42,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Empty(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Equal asserts that two objects are equal.
|
||||
//
|
||||
// a.Equal(123, 123, "123 and 123 should be equal")
|
||||
@ -47,17 +52,21 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs
|
||||
return Equal(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// EqualError asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that it is equal to the provided error.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// a.EqualError(err, expectedErrorString, "An error was expected")
|
||||
// if assert.Error(t, err, "An error was expected") {
|
||||
// assert.Equal(t, err, expectedError)
|
||||
// }
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
|
||||
return EqualError(a.t, theError, errString, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// and equal.
|
||||
//
|
||||
@ -68,6 +77,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
|
||||
return EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Error asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -80,6 +90,7 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
|
||||
return Error(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Exactly asserts that two objects are equal is value and type.
|
||||
//
|
||||
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
|
||||
@ -89,16 +100,19 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
|
||||
return Exactly(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Fail reports a failure through
|
||||
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
|
||||
return Fail(a.t, failureMessage, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// FailNow fails test
|
||||
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
|
||||
return FailNow(a.t, failureMessage, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// False asserts that the specified value is false.
|
||||
//
|
||||
// a.False(myBool, "myBool should be false")
|
||||
@ -108,6 +122,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
|
||||
return False(a.t, value, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyContains asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
@ -118,6 +133,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
|
||||
return HTTPBodyContains(a.t, handler, method, url, values, str)
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
@ -128,6 +144,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
|
||||
return HTTPBodyNotContains(a.t, handler, method, url, values, str)
|
||||
}
|
||||
|
||||
|
||||
// HTTPError asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
@ -137,6 +154,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
|
||||
return HTTPError(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
@ -146,6 +164,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
|
||||
return HTTPRedirect(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
|
||||
@ -155,6 +174,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st
|
||||
return HTTPSuccess(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// Implements asserts that an object is implemented by the specified interface.
|
||||
//
|
||||
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
|
||||
@ -162,6 +182,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
|
||||
return Implements(a.t, interfaceObject, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InDelta asserts that the two numerals are within delta of each other.
|
||||
//
|
||||
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
|
||||
@ -171,11 +192,13 @@ func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta flo
|
||||
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InDeltaSlice is the same as InDelta, except it compares two slices.
|
||||
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
|
||||
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InEpsilon asserts that expected and actual have a relative error less than epsilon
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
@ -183,16 +206,19 @@ func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon
|
||||
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
|
||||
}
|
||||
|
||||
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
|
||||
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
|
||||
return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
|
||||
|
||||
// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
|
||||
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
|
||||
return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return IsType(a.t, expectedType, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// JSONEq asserts that two JSON strings are equivalent.
|
||||
//
|
||||
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
|
||||
@ -202,6 +228,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf
|
||||
return JSONEq(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Len asserts that the specified object has specific length.
|
||||
// Len also fails if the object has a type that len() not accept.
|
||||
//
|
||||
@ -212,6 +239,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface
|
||||
return Len(a.t, object, length, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Nil asserts that the specified object is nil.
|
||||
//
|
||||
// a.Nil(err, "err should be nothing")
|
||||
@ -221,6 +249,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Nil(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NoError asserts that a function returned no error (i.e. `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -233,6 +262,7 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
|
||||
return NoError(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||
// specified substring or element.
|
||||
//
|
||||
@ -245,6 +275,7 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs
|
||||
return NotContains(a.t, s, contains, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@ -257,6 +288,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
|
||||
return NotEmpty(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotEqual asserts that the specified values are NOT equal.
|
||||
//
|
||||
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
|
||||
@ -266,6 +298,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
|
||||
return NotEqual(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err, "err should be something")
|
||||
@ -275,6 +308,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool
|
||||
return NotNil(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||
//
|
||||
// a.NotPanics(func(){
|
||||
@ -286,6 +320,7 @@ func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool
|
||||
return NotPanics(a.t, f, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotRegexp asserts that a specified regexp does not match a string.
|
||||
//
|
||||
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
|
||||
@ -296,11 +331,13 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
|
||||
return NotRegexp(a.t, rx, str, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotZero asserts that i is not the zero value for its type and returns the truth.
|
||||
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
|
||||
return NotZero(a.t, i, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
||||
//
|
||||
// a.Panics(func(){
|
||||
@ -312,6 +349,7 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
|
||||
return Panics(a.t, f, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Regexp asserts that a specified regexp matches a string.
|
||||
//
|
||||
// a.Regexp(regexp.MustCompile("start"), "it's starting")
|
||||
@ -322,6 +360,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
|
||||
return Regexp(a.t, rx, str, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// True asserts that the specified value is true.
|
||||
//
|
||||
// a.True(myBool, "myBool should be true")
|
||||
@ -331,6 +370,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
|
||||
return True(a.t, value, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// WithinDuration asserts that the two times are within duration delta of each other.
|
||||
//
|
||||
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
|
||||
@ -340,6 +380,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta
|
||||
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Zero asserts that i is the zero value for its type and returns the truth.
|
||||
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Zero(a.t, i, msgAndArgs...)
|
||||
|
45
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
45
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@ -18,6 +18,10 @@ import (
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
)
|
||||
|
||||
func init() {
|
||||
spew.Config.SortKeys = true
|
||||
}
|
||||
|
||||
// TestingT is an interface wrapper around *testing.T
|
||||
type TestingT interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
@ -275,9 +279,8 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
|
||||
if !ObjectsAreEqual(expected, actual) {
|
||||
diff := diff(expected, actual)
|
||||
expected, actual = formatUnequalValues(expected, actual)
|
||||
return Fail(t, fmt.Sprintf("Not equal: \n"+
|
||||
"expected: %s\n"+
|
||||
"received: %s%s", expected, actual, diff), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("Not equal: %s (expected)\n"+
|
||||
" != %s (actual)%s", expected, actual, diff), msgAndArgs...)
|
||||
}
|
||||
|
||||
return true
|
||||
@ -325,11 +328,8 @@ func isNumericType(t reflect.Type) bool {
|
||||
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
|
||||
|
||||
if !ObjectsAreEqualValues(expected, actual) {
|
||||
diff := diff(expected, actual)
|
||||
expected, actual = formatUnequalValues(expected, actual)
|
||||
return Fail(t, fmt.Sprintf("Not equal: \n"+
|
||||
"expected: %s\n"+
|
||||
"received: %s%s", expected, actual, diff), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
|
||||
" != %#v (actual)", expected, actual), msgAndArgs...)
|
||||
}
|
||||
|
||||
return true
|
||||
@ -882,7 +882,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
|
||||
if err != nil {
|
||||
return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("Received unexpected error %+v", err), msgAndArgs...)
|
||||
}
|
||||
|
||||
return true
|
||||
@ -913,18 +913,14 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
|
||||
if !Error(t, theError, msgAndArgs...) {
|
||||
|
||||
message := messageFromMsgAndArgs(msgAndArgs...)
|
||||
if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
|
||||
return false
|
||||
}
|
||||
expected := errString
|
||||
actual := theError.Error()
|
||||
// don't need to use deep equals here, we know they are both strings
|
||||
if expected != actual {
|
||||
return Fail(t, fmt.Sprintf("Error message not equal:\n"+
|
||||
"expected: %q\n"+
|
||||
"received: %q", expected, actual), msgAndArgs...)
|
||||
}
|
||||
return true
|
||||
s := "An error with value \"%s\" is expected but got \"%s\". %s"
|
||||
return Equal(t, errString, theError.Error(),
|
||||
s, errString, theError.Error(), message)
|
||||
}
|
||||
|
||||
// matchRegexp return true if a specified regexp matches a string.
|
||||
@ -1039,8 +1035,8 @@ func diff(expected interface{}, actual interface{}) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
e := spewConfig.Sdump(expected)
|
||||
a := spewConfig.Sdump(actual)
|
||||
e := spew.Sdump(expected)
|
||||
a := spew.Sdump(actual)
|
||||
|
||||
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
|
||||
A: difflib.SplitLines(e),
|
||||
@ -1054,10 +1050,3 @@ func diff(expected interface{}, actual interface{}) string {
|
||||
|
||||
return "\n\nDiff:\n" + diff
|
||||
}
|
||||
|
||||
var spewConfig = spew.ConfigState{
|
||||
Indent: " ",
|
||||
DisablePointerAddresses: true,
|
||||
DisableCapacities: true,
|
||||
SortKeys: true,
|
||||
}
|
||||
|
49
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
49
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
@ -6,12 +6,14 @@
|
||||
package require
|
||||
|
||||
import (
|
||||
|
||||
assert "github.com/stretchr/testify/assert"
|
||||
http "net/http"
|
||||
url "net/url"
|
||||
time "time"
|
||||
)
|
||||
|
||||
|
||||
// Condition uses a Comparison to assert a complex condition.
|
||||
func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
|
||||
if !assert.Condition(t, comp, msgAndArgs...) {
|
||||
@ -19,6 +21,7 @@ func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Contains asserts that the specified string, list(array, slice...) or map contains the
|
||||
// specified substring or element.
|
||||
//
|
||||
@ -33,6 +36,7 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@ -45,6 +49,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Equal asserts that two objects are equal.
|
||||
//
|
||||
// assert.Equal(t, 123, 123, "123 and 123 should be equal")
|
||||
@ -56,11 +61,14 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// EqualError asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that it is equal to the provided error.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// assert.EqualError(t, err, expectedErrorString, "An error was expected")
|
||||
// if assert.Error(t, err, "An error was expected") {
|
||||
// assert.Equal(t, err, expectedError)
|
||||
// }
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
|
||||
@ -69,6 +77,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// and equal.
|
||||
//
|
||||
@ -81,6 +90,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Error asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -95,6 +105,7 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Exactly asserts that two objects are equal is value and type.
|
||||
//
|
||||
// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
|
||||
@ -106,6 +117,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ..
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Fail reports a failure through
|
||||
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
|
||||
if !assert.Fail(t, failureMessage, msgAndArgs...) {
|
||||
@ -113,6 +125,7 @@ func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// FailNow fails test
|
||||
func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
|
||||
if !assert.FailNow(t, failureMessage, msgAndArgs...) {
|
||||
@ -120,6 +133,7 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// False asserts that the specified value is false.
|
||||
//
|
||||
// assert.False(t, myBool, "myBool should be false")
|
||||
@ -131,6 +145,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyContains asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
@ -143,6 +158,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
@ -155,6 +171,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// HTTPError asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
@ -166,6 +183,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
@ -177,6 +195,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
|
||||
@ -188,6 +207,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Implements asserts that an object is implemented by the specified interface.
|
||||
//
|
||||
// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
|
||||
@ -197,6 +217,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// InDelta asserts that the two numerals are within delta of each other.
|
||||
//
|
||||
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
|
||||
@ -208,6 +229,7 @@ func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// InDeltaSlice is the same as InDelta, except it compares two slices.
|
||||
func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||
if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
|
||||
@ -215,6 +237,7 @@ func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta fl
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// InEpsilon asserts that expected and actual have a relative error less than epsilon
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
@ -224,13 +247,15 @@ func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon flo
|
||||
}
|
||||
}
|
||||
|
||||
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
|
||||
func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
|
||||
if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) {
|
||||
|
||||
// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
|
||||
func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||
if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||
if !assert.IsType(t, expectedType, object, msgAndArgs...) {
|
||||
@ -238,6 +263,7 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// JSONEq asserts that two JSON strings are equivalent.
|
||||
//
|
||||
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
|
||||
@ -249,6 +275,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Len asserts that the specified object has specific length.
|
||||
// Len also fails if the object has a type that len() not accept.
|
||||
//
|
||||
@ -261,6 +288,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Nil asserts that the specified object is nil.
|
||||
//
|
||||
// assert.Nil(t, err, "err should be nothing")
|
||||
@ -272,6 +300,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NoError asserts that a function returned no error (i.e. `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -286,6 +315,7 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||
// specified substring or element.
|
||||
//
|
||||
@ -300,6 +330,7 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@ -314,6 +345,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NotEqual asserts that the specified values are NOT equal.
|
||||
//
|
||||
// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
|
||||
@ -325,6 +357,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs .
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNil(t, err, "err should be something")
|
||||
@ -336,6 +369,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||
//
|
||||
// assert.NotPanics(t, func(){
|
||||
@ -349,6 +383,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NotRegexp asserts that a specified regexp does not match a string.
|
||||
//
|
||||
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
|
||||
@ -361,6 +396,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// NotZero asserts that i is not the zero value for its type and returns the truth.
|
||||
func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
|
||||
if !assert.NotZero(t, i, msgAndArgs...) {
|
||||
@ -368,6 +404,7 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
||||
//
|
||||
// assert.Panics(t, func(){
|
||||
@ -381,6 +418,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Regexp asserts that a specified regexp matches a string.
|
||||
//
|
||||
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
|
||||
@ -393,6 +431,7 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// True asserts that the specified value is true.
|
||||
//
|
||||
// assert.True(t, myBool, "myBool should be true")
|
||||
@ -404,6 +443,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// WithinDuration asserts that the two times are within duration delta of each other.
|
||||
//
|
||||
// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
|
||||
@ -415,6 +455,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Zero asserts that i is the zero value for its type and returns the truth.
|
||||
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
|
||||
if !assert.Zero(t, i, msgAndArgs...) {
|
||||
|
49
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
49
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
@ -6,17 +6,20 @@
|
||||
package require
|
||||
|
||||
import (
|
||||
|
||||
assert "github.com/stretchr/testify/assert"
|
||||
http "net/http"
|
||||
url "net/url"
|
||||
time "time"
|
||||
)
|
||||
|
||||
|
||||
// Condition uses a Comparison to assert a complex condition.
|
||||
func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
|
||||
Condition(a.t, comp, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Contains asserts that the specified string, list(array, slice...) or map contains the
|
||||
// specified substring or element.
|
||||
//
|
||||
@ -29,6 +32,7 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ..
|
||||
Contains(a.t, s, contains, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@ -39,6 +43,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
|
||||
Empty(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Equal asserts that two objects are equal.
|
||||
//
|
||||
// a.Equal(123, 123, "123 and 123 should be equal")
|
||||
@ -48,17 +53,21 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs
|
||||
Equal(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// EqualError asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that it is equal to the provided error.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// a.EqualError(err, expectedErrorString, "An error was expected")
|
||||
// if assert.Error(t, err, "An error was expected") {
|
||||
// assert.Equal(t, err, expectedError)
|
||||
// }
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
|
||||
EqualError(a.t, theError, errString, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// and equal.
|
||||
//
|
||||
@ -69,6 +78,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
|
||||
EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Error asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -81,6 +91,7 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
|
||||
Error(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Exactly asserts that two objects are equal is value and type.
|
||||
//
|
||||
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
|
||||
@ -90,16 +101,19 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
|
||||
Exactly(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Fail reports a failure through
|
||||
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
|
||||
Fail(a.t, failureMessage, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// FailNow fails test
|
||||
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
|
||||
FailNow(a.t, failureMessage, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// False asserts that the specified value is false.
|
||||
//
|
||||
// a.False(myBool, "myBool should be false")
|
||||
@ -109,6 +123,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
|
||||
False(a.t, value, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyContains asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
@ -119,6 +134,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
|
||||
HTTPBodyContains(a.t, handler, method, url, values, str)
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
@ -129,6 +145,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
|
||||
HTTPBodyNotContains(a.t, handler, method, url, values, str)
|
||||
}
|
||||
|
||||
|
||||
// HTTPError asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
@ -138,6 +155,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
|
||||
HTTPError(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
@ -147,6 +165,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
|
||||
HTTPRedirect(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
|
||||
@ -156,6 +175,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st
|
||||
HTTPSuccess(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// Implements asserts that an object is implemented by the specified interface.
|
||||
//
|
||||
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
|
||||
@ -163,6 +183,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
|
||||
Implements(a.t, interfaceObject, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InDelta asserts that the two numerals are within delta of each other.
|
||||
//
|
||||
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
|
||||
@ -172,11 +193,13 @@ func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta flo
|
||||
InDelta(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InDeltaSlice is the same as InDelta, except it compares two slices.
|
||||
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||
InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InEpsilon asserts that expected and actual have a relative error less than epsilon
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
@ -184,16 +207,19 @@ func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon
|
||||
InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
|
||||
}
|
||||
|
||||
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
|
||||
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
|
||||
InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
|
||||
|
||||
// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
|
||||
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
|
||||
InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||
IsType(a.t, expectedType, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// JSONEq asserts that two JSON strings are equivalent.
|
||||
//
|
||||
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
|
||||
@ -203,6 +229,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf
|
||||
JSONEq(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Len asserts that the specified object has specific length.
|
||||
// Len also fails if the object has a type that len() not accept.
|
||||
//
|
||||
@ -213,6 +240,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface
|
||||
Len(a.t, object, length, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Nil asserts that the specified object is nil.
|
||||
//
|
||||
// a.Nil(err, "err should be nothing")
|
||||
@ -222,6 +250,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
|
||||
Nil(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NoError asserts that a function returned no error (i.e. `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -234,6 +263,7 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
|
||||
NoError(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||
// specified substring or element.
|
||||
//
|
||||
@ -246,6 +276,7 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs
|
||||
NotContains(a.t, s, contains, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@ -258,6 +289,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
|
||||
NotEmpty(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotEqual asserts that the specified values are NOT equal.
|
||||
//
|
||||
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
|
||||
@ -267,6 +299,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
|
||||
NotEqual(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err, "err should be something")
|
||||
@ -276,6 +309,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
|
||||
NotNil(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||
//
|
||||
// a.NotPanics(func(){
|
||||
@ -287,6 +321,7 @@ func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}
|
||||
NotPanics(a.t, f, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotRegexp asserts that a specified regexp does not match a string.
|
||||
//
|
||||
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
|
||||
@ -297,11 +332,13 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
|
||||
NotRegexp(a.t, rx, str, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotZero asserts that i is not the zero value for its type and returns the truth.
|
||||
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
|
||||
NotZero(a.t, i, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
||||
//
|
||||
// a.Panics(func(){
|
||||
@ -313,6 +350,7 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
|
||||
Panics(a.t, f, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Regexp asserts that a specified regexp matches a string.
|
||||
//
|
||||
// a.Regexp(regexp.MustCompile("start"), "it's starting")
|
||||
@ -323,6 +361,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
|
||||
Regexp(a.t, rx, str, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// True asserts that the specified value is true.
|
||||
//
|
||||
// a.True(myBool, "myBool should be true")
|
||||
@ -332,6 +371,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
|
||||
True(a.t, value, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// WithinDuration asserts that the two times are within duration delta of each other.
|
||||
//
|
||||
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
|
||||
@ -341,6 +381,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta
|
||||
WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Zero asserts that i is the zero value for its type and returns the truth.
|
||||
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
|
||||
Zero(a.t, i, msgAndArgs...)
|
||||
|
12
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
12
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
@ -27,7 +27,17 @@ func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
return client.Do(req.WithContext(ctx))
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
// If we got an error, and the context has been canceled,
|
||||
// the context's error is probably more useful.
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
|
9
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
9
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
@ -53,13 +53,13 @@ const (
|
||||
)
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
if req.Close && dialOnMiss {
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
cc, err := p.t.dialClientConn(addr)
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.singleUse = true
|
||||
return cc, nil
|
||||
}
|
||||
p.mu.Lock()
|
||||
@ -104,7 +104,8 @@ func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(addr string) {
|
||||
c.res, c.err = c.p.t.dialClientConn(addr)
|
||||
const singleUse = false // shared conn
|
||||
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
||||
close(c.done)
|
||||
|
||||
c.p.mu.Lock()
|
||||
|
8
vendor/golang.org/x/net/http2/errors.go
generated
vendored
8
vendor/golang.org/x/net/http2/errors.go
generated
vendored
@ -64,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error:
|
||||
type StreamError struct {
|
||||
StreamID uint32
|
||||
Code ErrCode
|
||||
Cause error // optional additional detail
|
||||
}
|
||||
|
||||
func streamError(id uint32, code ErrCode) StreamError {
|
||||
return StreamError{StreamID: id, Code: code}
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
if e.Cause != nil {
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
||||
}
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||
}
|
||||
|
||||
|
46
vendor/golang.org/x/net/http2/frame.go
generated
vendored
46
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@ -594,6 +594,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||
var (
|
||||
errStreamID = errors.New("invalid stream ID")
|
||||
errDepStreamID = errors.New("invalid dependent stream ID")
|
||||
errPadLength = errors.New("pad length too large")
|
||||
)
|
||||
|
||||
func validStreamIDOrZero(streamID uint32) bool {
|
||||
@ -607,18 +608,40 @@ func validStreamID(streamID uint32) bool {
|
||||
// WriteData writes a DATA frame.
|
||||
//
|
||||
// It will perform exactly one Write to the underlying Writer.
|
||||
// It is the caller's responsibility to not call other Write methods concurrently.
|
||||
// It is the caller's responsibility not to violate the maximum frame size
|
||||
// and to not call other Write methods concurrently.
|
||||
func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
||||
// TODO: ignoring padding for now. will add when somebody cares.
|
||||
return f.WriteDataPadded(streamID, endStream, data, nil)
|
||||
}
|
||||
|
||||
// WriteData writes a DATA frame with optional padding.
|
||||
//
|
||||
// If pad is nil, the padding bit is not sent.
|
||||
// The length of pad must not exceed 255 bytes.
|
||||
//
|
||||
// It will perform exactly one Write to the underlying Writer.
|
||||
// It is the caller's responsibility not to violate the maximum frame size
|
||||
// and to not call other Write methods concurrently.
|
||||
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
||||
return errStreamID
|
||||
}
|
||||
if len(pad) > 255 {
|
||||
return errPadLength
|
||||
}
|
||||
var flags Flags
|
||||
if endStream {
|
||||
flags |= FlagDataEndStream
|
||||
}
|
||||
if pad != nil {
|
||||
flags |= FlagDataPadded
|
||||
}
|
||||
f.startWrite(FrameData, flags, streamID)
|
||||
if pad != nil {
|
||||
f.wbuf = append(f.wbuf, byte(len(pad)))
|
||||
}
|
||||
f.wbuf = append(f.wbuf, data...)
|
||||
f.wbuf = append(f.wbuf, pad...)
|
||||
return f.endWrite()
|
||||
}
|
||||
|
||||
@ -714,7 +737,7 @@ func (f *Framer) WriteSettings(settings ...Setting) error {
|
||||
return f.endWrite()
|
||||
}
|
||||
|
||||
// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
|
||||
// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
|
||||
//
|
||||
// It will perform exactly one Write to the underlying Writer.
|
||||
// It is the caller's responsibility to not call other Write methods concurrently.
|
||||
@ -840,7 +863,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||
if fh.StreamID == 0 {
|
||||
return nil, ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
return nil, StreamError{fh.StreamID, ErrCodeProtocol}
|
||||
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||
}
|
||||
return &WindowUpdateFrame{
|
||||
FrameHeader: fh,
|
||||
@ -921,7 +944,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
|
||||
}
|
||||
}
|
||||
if len(p)-int(padLength) <= 0 {
|
||||
return nil, StreamError{fh.StreamID, ErrCodeProtocol}
|
||||
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||
}
|
||||
hf.headerFragBuf = p[:len(p)-int(padLength)]
|
||||
return hf, nil
|
||||
@ -1396,6 +1419,9 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
hdec.SetEmitEnabled(true)
|
||||
hdec.SetMaxStringLength(fr.maxHeaderStringLen())
|
||||
hdec.SetEmitFunc(func(hf hpack.HeaderField) {
|
||||
if VerboseLogs && logFrameReads {
|
||||
log.Printf("http2: decoded hpack field %+v", hf)
|
||||
}
|
||||
if !httplex.ValidHeaderFieldValue(hf.Value) {
|
||||
invalid = headerFieldValueError(hf.Value)
|
||||
}
|
||||
@ -1454,11 +1480,17 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
}
|
||||
if invalid != nil {
|
||||
fr.errDetail = invalid
|
||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol}
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: invalid header: %v", invalid)
|
||||
}
|
||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
|
||||
}
|
||||
if err := mh.checkPseudos(); err != nil {
|
||||
fr.errDetail = err
|
||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol}
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: invalid pseudo headers: %v", err)
|
||||
}
|
||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
|
||||
}
|
||||
return mh, nil
|
||||
}
|
||||
|
12
vendor/golang.org/x/net/http2/go17.go
generated
vendored
12
vendor/golang.org/x/net/http2/go17.go
generated
vendored
@ -39,6 +39,13 @@ type clientTrace httptrace.ClientTrace
|
||||
|
||||
func reqContext(r *http.Request) context.Context { return r.Context() }
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration {
|
||||
if t.t1 != nil {
|
||||
return t.t1.IdleConnTimeout
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
|
||||
|
||||
func traceGotConn(req *http.Request, cc *ClientConn) {
|
||||
@ -92,3 +99,8 @@ func requestTrace(req *http.Request) *clientTrace {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
return (*clientTrace)(trace)
|
||||
}
|
||||
|
||||
// Ping sends a PING frame to the server and waits for the ack.
|
||||
func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
return cc.ping(ctx)
|
||||
}
|
||||
|
36
vendor/golang.org/x/net/http2/go17_not18.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/http2/go17_not18.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7,!go1.8
|
||||
|
||||
package http2
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// temporary copy of Go 1.7's private tls.Config.clone:
|
||||
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
ClientAuth: c.ClientAuth,
|
||||
ClientCAs: c.ClientCAs,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
SessionTicketKey: c.SessionTicketKey,
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
|
||||
Renegotiation: c.Renegotiation,
|
||||
}
|
||||
}
|
11
vendor/golang.org/x/net/http2/go18.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/http2/go18.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package http2
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
|
2
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
2
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
@ -57,7 +57,7 @@ func (hf HeaderField) String() string {
|
||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||
}
|
||||
|
||||
// Size returns the size of an entry per RFC 7540 section 5.2.
|
||||
// Size returns the size of an entry per RFC 7541 section 4.1.
|
||||
func (hf HeaderField) Size() uint32 {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
|
16
vendor/golang.org/x/net/http2/http2.go
generated
vendored
16
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@ -13,6 +13,7 @@
|
||||
// See https://http2.github.io/ for more information on HTTP/2.
|
||||
//
|
||||
// See https://http2.golang.org/ for a test server running this code.
|
||||
//
|
||||
package http2
|
||||
|
||||
import (
|
||||
@ -342,10 +343,23 @@ func (s *sorter) Keys(h http.Header) []string {
|
||||
}
|
||||
|
||||
func (s *sorter) SortStrings(ss []string) {
|
||||
// Our sorter works on s.v, which sorter owners, so
|
||||
// Our sorter works on s.v, which sorter owns, so
|
||||
// stash it away while we sort the user's buffer.
|
||||
save := s.v
|
||||
s.v = ss
|
||||
sort.Sort(s)
|
||||
s.v = save
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
// *) a non-empty string starting with '/', but not with with "//",
|
||||
// *) the string '*', for OPTIONS requests.
|
||||
//
|
||||
// For now this is only used a quick check for deciding when to clean
|
||||
// up Opaque URLs before sending requests from the Transport.
|
||||
// See golang.org/issue/16847
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
|
||||
}
|
||||
|
38
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
38
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
@ -7,11 +7,16 @@
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type contextContext interface{}
|
||||
type contextContext interface {
|
||||
Done() <-chan struct{}
|
||||
Err() error
|
||||
}
|
||||
|
||||
type fakeContext struct{}
|
||||
|
||||
@ -49,3 +54,34 @@ func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
|
||||
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
|
||||
return req
|
||||
}
|
||||
|
||||
// temporary copy of Go 1.6's private tls.Config.clone:
|
||||
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
ClientAuth: c.ClientAuth,
|
||||
ClientCAs: c.ClientCAs,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
SessionTicketKey: c.SessionTicketKey,
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) Ping(ctx contextContext) error {
|
||||
return cc.ping(ctx)
|
||||
}
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration { return 0 }
|
||||
|
82
vendor/golang.org/x/net/http2/server.go
generated
vendored
82
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -922,7 +922,7 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
|
||||
// state here anyway, after telling the peer
|
||||
// we're hanging up on them.
|
||||
st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
|
||||
errCancel := StreamError{st.id, ErrCodeCancel}
|
||||
errCancel := streamError(st.id, ErrCodeCancel)
|
||||
sc.resetStream(errCancel)
|
||||
case stateHalfClosedRemote:
|
||||
sc.closeStream(st, errHandlerComplete)
|
||||
@ -1133,7 +1133,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
|
||||
return nil
|
||||
}
|
||||
if !st.flow.add(int32(f.Increment)) {
|
||||
return StreamError{f.StreamID, ErrCodeFlowControl}
|
||||
return streamError(f.StreamID, ErrCodeFlowControl)
|
||||
}
|
||||
default: // connection-level flow control
|
||||
if !sc.flow.add(int32(f.Increment)) {
|
||||
@ -1159,7 +1159,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
|
||||
if st != nil {
|
||||
st.gotReset = true
|
||||
st.cancelCtx()
|
||||
sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
|
||||
sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1176,6 +1176,10 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
}
|
||||
delete(sc.streams, st.id)
|
||||
if p := st.body; p != nil {
|
||||
// Return any buffered unread bytes worth of conn-level flow control.
|
||||
// See golang.org/issue/16481
|
||||
sc.sendWindowUpdate(nil, p.Len())
|
||||
|
||||
p.CloseWithError(err)
|
||||
}
|
||||
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
|
||||
@ -1277,6 +1281,8 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
|
||||
|
||||
func (sc *serverConn) processData(f *DataFrame) error {
|
||||
sc.serveG.check()
|
||||
data := f.Data()
|
||||
|
||||
// "If a DATA frame is received whose stream is not in "open"
|
||||
// or "half closed (local)" state, the recipient MUST respond
|
||||
// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
|
||||
@ -1288,33 +1294,56 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
// the http.Handler returned, so it's done reading &
|
||||
// done writing). Try to stop the client from sending
|
||||
// more DATA.
|
||||
return StreamError{id, ErrCodeStreamClosed}
|
||||
|
||||
// But still enforce their connection-level flow control,
|
||||
// and return any flow control bytes since we're not going
|
||||
// to consume them.
|
||||
if sc.inflow.available() < int32(f.Length) {
|
||||
return streamError(id, ErrCodeFlowControl)
|
||||
}
|
||||
// Deduct the flow control from inflow, since we're
|
||||
// going to immediately add it back in
|
||||
// sendWindowUpdate, which also schedules sending the
|
||||
// frames.
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
if st.body == nil {
|
||||
panic("internal error: should have a body in this state")
|
||||
}
|
||||
data := f.Data()
|
||||
|
||||
// Sender sending more than they'd declared?
|
||||
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
|
||||
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
|
||||
return StreamError{id, ErrCodeStreamClosed}
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
if len(data) > 0 {
|
||||
if f.Length > 0 {
|
||||
// Check whether the client has flow control quota.
|
||||
if int(st.inflow.available()) < len(data) {
|
||||
return StreamError{id, ErrCodeFlowControl}
|
||||
if st.inflow.available() < int32(f.Length) {
|
||||
return streamError(id, ErrCodeFlowControl)
|
||||
}
|
||||
st.inflow.take(int32(len(data)))
|
||||
st.inflow.take(int32(f.Length))
|
||||
|
||||
if len(data) > 0 {
|
||||
wrote, err := st.body.Write(data)
|
||||
if err != nil {
|
||||
return StreamError{id, ErrCodeStreamClosed}
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
if wrote != len(data) {
|
||||
panic("internal error: bad Writer")
|
||||
}
|
||||
st.bodyBytes += int64(len(data))
|
||||
}
|
||||
|
||||
// Return any padded flow control now, since we won't
|
||||
// refund it later on body reads.
|
||||
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
|
||||
sc.sendWindowUpdate32(nil, pad)
|
||||
sc.sendWindowUpdate32(st, pad)
|
||||
}
|
||||
}
|
||||
if f.StreamEnded() {
|
||||
st.endStream()
|
||||
}
|
||||
@ -1417,14 +1446,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// REFUSED_STREAM."
|
||||
if sc.unackedSettings == 0 {
|
||||
// They should know better.
|
||||
return StreamError{st.id, ErrCodeProtocol}
|
||||
return streamError(st.id, ErrCodeProtocol)
|
||||
}
|
||||
// Assume it's a network race, where they just haven't
|
||||
// received our last SETTINGS update. But actually
|
||||
// this can't happen yet, because we don't yet provide
|
||||
// a way for users to adjust server parameters at
|
||||
// runtime.
|
||||
return StreamError{st.id, ErrCodeRefusedStream}
|
||||
return streamError(st.id, ErrCodeRefusedStream)
|
||||
}
|
||||
|
||||
rw, req, err := sc.newWriterAndRequest(st, f)
|
||||
@ -1446,6 +1475,19 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
handler = new400Handler(err)
|
||||
}
|
||||
|
||||
// The net/http package sets the read deadline from the
|
||||
// http.Server.ReadTimeout during the TLS handshake, but then
|
||||
// passes the connection off to us with the deadline already
|
||||
// set. Disarm it here after the request headers are read, similar
|
||||
// to how the http1 server works.
|
||||
// Unlike http1, though, we never re-arm it yet, though.
|
||||
// TODO(bradfitz): figure out golang.org/issue/14204
|
||||
// (IdleTimeout) and how this relates. Maybe the default
|
||||
// IdleTimeout is ReadTimeout.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
}
|
||||
@ -1458,11 +1500,11 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
||||
}
|
||||
st.gotTrailerHeader = true
|
||||
if !f.StreamEnded() {
|
||||
return StreamError{st.id, ErrCodeProtocol}
|
||||
return streamError(st.id, ErrCodeProtocol)
|
||||
}
|
||||
|
||||
if len(f.PseudoFields()) > 0 {
|
||||
return StreamError{st.id, ErrCodeProtocol}
|
||||
return streamError(st.id, ErrCodeProtocol)
|
||||
}
|
||||
if st.trailer != nil {
|
||||
for _, hf := range f.RegularFields() {
|
||||
@ -1471,7 +1513,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
||||
// TODO: send more details to the peer somehow. But http2 has
|
||||
// no way to send debug data at a stream level. Discuss with
|
||||
// HTTP folk.
|
||||
return StreamError{st.id, ErrCodeProtocol}
|
||||
return streamError(st.id, ErrCodeProtocol)
|
||||
}
|
||||
st.trailer[key] = append(st.trailer[key], hf.Value)
|
||||
}
|
||||
@ -1532,7 +1574,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
isConnect := method == "CONNECT"
|
||||
if isConnect {
|
||||
if path != "" || scheme != "" || authority == "" {
|
||||
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
|
||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
||||
}
|
||||
} else if method == "" || path == "" ||
|
||||
(scheme != "https" && scheme != "http") {
|
||||
@ -1546,13 +1588,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
// "All HTTP/2 requests MUST include exactly one valid
|
||||
// value for the :method, :scheme, and :path
|
||||
// pseudo-header fields"
|
||||
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
|
||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
||||
}
|
||||
|
||||
bodyOpen := !f.StreamEnded()
|
||||
if method == "HEAD" && bodyOpen {
|
||||
// HEAD requests can't have bodies
|
||||
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
|
||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
||||
}
|
||||
var tlsState *tls.ConnectionState // nil if not scheme https
|
||||
|
||||
@ -1610,7 +1652,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
var err error
|
||||
url_, err = url.ParseRequestURI(path)
|
||||
if err != nil {
|
||||
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
|
||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
||||
}
|
||||
requestURI = path
|
||||
}
|
||||
|
404
vendor/golang.org/x/net/http2/transport.go
generated
vendored
404
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -10,12 +10,14 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"sort"
|
||||
@ -25,6 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/net/lex/httplex"
|
||||
)
|
||||
|
||||
@ -148,24 +151,29 @@ type ClientConn struct {
|
||||
readerDone chan struct{} // closed on error
|
||||
readerErr error // set before readerDone is closed
|
||||
|
||||
idleTimeout time.Duration // or 0 for never
|
||||
idleTimer *time.Timer
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
cond *sync.Cond // hold mu; broadcast on flow/closed changes
|
||||
flow flow // our conn-level flow control quota (cs.flow is per stream)
|
||||
inflow flow // peer's conn-level flow control
|
||||
closed bool
|
||||
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
|
||||
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
|
||||
goAwayDebug string // goAway frame's debug data, retained as a string
|
||||
streams map[uint32]*clientStream // client-initiated
|
||||
nextStreamID uint32
|
||||
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
|
||||
bw *bufio.Writer
|
||||
br *bufio.Reader
|
||||
fr *Framer
|
||||
lastActive time.Time
|
||||
|
||||
// Settings from peer:
|
||||
// Settings from peer: (also guarded by mu)
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
initialWindowSize uint32
|
||||
|
||||
hbuf bytes.Buffer // HPACK encoder writes into this
|
||||
henc *hpack.Encoder
|
||||
freeBuf [][]byte
|
||||
@ -283,14 +291,18 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
|
||||
// and returns a host:port. The port 443 is added if needed.
|
||||
func authorityAddr(scheme string, authority string) (addr string) {
|
||||
if _, _, err := net.SplitHostPort(authority); err == nil {
|
||||
return authority
|
||||
}
|
||||
port := "443"
|
||||
host, port, err := net.SplitHostPort(authority)
|
||||
if err != nil { // authority didn't have a port
|
||||
port = "443"
|
||||
if scheme == "http" {
|
||||
port = "80"
|
||||
}
|
||||
return net.JoinHostPort(authority, port)
|
||||
host = authority
|
||||
}
|
||||
if a, err := idna.ToASCII(host); err == nil {
|
||||
host = a
|
||||
}
|
||||
return net.JoinHostPort(host, port)
|
||||
}
|
||||
|
||||
// RoundTripOpt is like RoundTrip, but takes options.
|
||||
@ -339,7 +351,7 @@ func shouldRetryRequest(req *http.Request, err error) bool {
|
||||
return err == errClientConnUnusable
|
||||
}
|
||||
|
||||
func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
|
||||
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -348,13 +360,13 @@ func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.NewClientConn(tconn)
|
||||
return t.newClientConn(tconn, singleUse)
|
||||
}
|
||||
|
||||
func (t *Transport) newTLSConfig(host string) *tls.Config {
|
||||
cfg := new(tls.Config)
|
||||
if t.TLSClientConfig != nil {
|
||||
*cfg = *t.TLSClientConfig
|
||||
*cfg = *cloneTLSConfig(t.TLSClientConfig)
|
||||
}
|
||||
if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
|
||||
cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
|
||||
@ -409,14 +421,10 @@ func (t *Transport) expectContinueTimeout() time.Duration {
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
|
||||
}
|
||||
if _, err := c.Write(clientPreface); err != nil {
|
||||
t.vlogf("client preface write error: %v", err)
|
||||
return nil, err
|
||||
return t.newClientConn(c, false)
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
cc := &ClientConn{
|
||||
t: t,
|
||||
tconn: c,
|
||||
@ -426,7 +434,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
initialWindowSize: 65535, // spec default
|
||||
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
|
||||
streams: make(map[uint32]*clientStream),
|
||||
singleUse: singleUse,
|
||||
wantSettingsAck: true,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
}
|
||||
if d := t.idleConnTimeout(); d != 0 {
|
||||
cc.idleTimeout = d
|
||||
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
}
|
||||
|
||||
cc.cond = sync.NewCond(&cc.mu)
|
||||
cc.flow.add(int32(initialWindowSize))
|
||||
|
||||
@ -454,6 +473,8 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
if max := t.maxHeaderListSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
|
||||
}
|
||||
|
||||
cc.bw.Write(clientPreface)
|
||||
cc.fr.WriteSettings(initialSettings...)
|
||||
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
|
||||
cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
|
||||
@ -462,33 +483,6 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return nil, cc.werr
|
||||
}
|
||||
|
||||
// Read the obligatory SETTINGS frame
|
||||
f, err := cc.fr.ReadFrame()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sf, ok := f.(*SettingsFrame)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected settings frame, got: %T", f)
|
||||
}
|
||||
cc.fr.WriteSettingsAck()
|
||||
cc.bw.Flush()
|
||||
|
||||
sf.ForeachSetting(func(s Setting) error {
|
||||
switch s.ID {
|
||||
case SettingMaxFrameSize:
|
||||
cc.maxFrameSize = s.Val
|
||||
case SettingMaxConcurrentStreams:
|
||||
cc.maxConcurrentStreams = s.Val
|
||||
case SettingInitialWindowSize:
|
||||
cc.initialWindowSize = s.Val
|
||||
default:
|
||||
// TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
|
||||
t.vlogf("Unhandled Setting: %v", s)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
go cc.readLoop()
|
||||
return cc, nil
|
||||
}
|
||||
@ -521,7 +515,17 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
|
||||
}
|
||||
return cc.goAway == nil && !cc.closed &&
|
||||
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
|
||||
cc.nextStreamID < 2147483647
|
||||
cc.nextStreamID < math.MaxInt32
|
||||
}
|
||||
|
||||
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
|
||||
// only be called when we're idle, but because we're coming from a new
|
||||
// goroutine, there could be a new request coming in at the same time,
|
||||
// so this simply calls the synchronized closeIfIdle to shut down this
|
||||
// connection. The timer could just call closeIfIdle, but this is more
|
||||
// clear.
|
||||
func (cc *ClientConn) onIdleTimeout() {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
|
||||
func (cc *ClientConn) closeIfIdle() {
|
||||
@ -531,9 +535,13 @@ func (cc *ClientConn) closeIfIdle() {
|
||||
return
|
||||
}
|
||||
cc.closed = true
|
||||
nextID := cc.nextStreamID
|
||||
// TODO: do clients send GOAWAY too? maybe? Just Close:
|
||||
cc.mu.Unlock()
|
||||
|
||||
if VerboseLogs {
|
||||
cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
|
||||
}
|
||||
cc.tconn.Close()
|
||||
}
|
||||
|
||||
@ -616,13 +624,13 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
|
||||
// Certain headers are special-cased as okay but not transmitted later.
|
||||
func checkConnHeaders(req *http.Request) error {
|
||||
if v := req.Header.Get("Upgrade"); v != "" {
|
||||
return errors.New("http2: invalid Upgrade request header")
|
||||
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
|
||||
}
|
||||
if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
|
||||
return errors.New("http2: invalid Transfer-Encoding request header")
|
||||
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
|
||||
}
|
||||
if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
|
||||
return errors.New("http2: invalid Connection request header")
|
||||
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
|
||||
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -635,19 +643,27 @@ func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
|
||||
if req.ContentLength != 0 {
|
||||
return req.Body, req.ContentLength
|
||||
}
|
||||
// Don't try to sniff the size if they're doing an expect
|
||||
// request (Issue 16002):
|
||||
if req.Header.Get("Expect") == "100-continue" {
|
||||
return req.Body, -1
|
||||
}
|
||||
|
||||
// We have a body but a zero content length. Test to see if
|
||||
// it's actually zero or just unset.
|
||||
var buf [1]byte
|
||||
n, rerr := io.ReadFull(body, buf[:])
|
||||
n, rerr := body.Read(buf[:])
|
||||
if rerr != nil && rerr != io.EOF {
|
||||
return errorReader{rerr}, -1
|
||||
}
|
||||
if n == 1 {
|
||||
// Oh, guess there is data in this Body Reader after all.
|
||||
// The ContentLength field just wasn't set.
|
||||
// Stich the Body back together again, re-attaching our
|
||||
// Stitch the Body back together again, re-attaching our
|
||||
// consumed byte.
|
||||
if rerr == io.EOF {
|
||||
return bytes.NewReader(buf[:]), 1
|
||||
}
|
||||
return io.MultiReader(bytes.NewReader(buf[:]), body), -1
|
||||
}
|
||||
// Body is actually zero bytes.
|
||||
@ -658,6 +674,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if err := checkConnHeaders(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cc.idleTimer != nil {
|
||||
cc.idleTimer.Stop()
|
||||
}
|
||||
|
||||
trailers, err := commaSeparatedTrailers(req)
|
||||
if err != nil {
|
||||
@ -665,9 +684,6 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
}
|
||||
hasTrailers := trailers != ""
|
||||
|
||||
body, contentLen := bodyAndLength(req)
|
||||
hasBody := body != nil
|
||||
|
||||
cc.mu.Lock()
|
||||
cc.lastActive = time.Now()
|
||||
if cc.closed || !cc.canTakeNewRequestLocked() {
|
||||
@ -675,6 +691,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return nil, errClientConnUnusable
|
||||
}
|
||||
|
||||
body, contentLen := bodyAndLength(req)
|
||||
hasBody := body != nil
|
||||
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
var requestedGzip bool
|
||||
if !cc.t.disableCompression() &&
|
||||
@ -747,9 +766,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
bodyWritten := false
|
||||
ctx := reqContext(req)
|
||||
|
||||
for {
|
||||
select {
|
||||
case re := <-readLoopResCh:
|
||||
handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
|
||||
res := re.res
|
||||
if re.err != nil || res.StatusCode > 299 {
|
||||
// On error or status code 3xx, 4xx, 5xx, etc abort any
|
||||
@ -771,6 +788,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
res.Request = req
|
||||
res.TLS = cc.tlsState
|
||||
return res, nil
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case re := <-readLoopResCh:
|
||||
return handleReadLoopResponse(re)
|
||||
case <-respHeaderTimer:
|
||||
cc.forgetStreamID(cs.ID)
|
||||
if !hasBody || bodyWritten {
|
||||
@ -804,6 +827,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
// forgetStreamID.
|
||||
return nil, cs.resetErr
|
||||
case err := <-bodyWriter.resc:
|
||||
// Prefer the read loop's response, if available. Issue 16102.
|
||||
select {
|
||||
case re := <-readLoopResCh:
|
||||
return handleReadLoopResponse(re)
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -908,10 +937,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
|
||||
err = cc.fr.WriteData(cs.ID, sentEnd, data)
|
||||
if err == nil {
|
||||
// TODO(bradfitz): this flush is for latency, not bandwidth.
|
||||
// Most requests won't need this. Make this opt-in or opt-out?
|
||||
// Use some heuristic on the body type? Nagel-like timers?
|
||||
// Based on 'n'? Only last chunk of this for loop, unless flow control
|
||||
// tokens are low? For now, always:
|
||||
// Most requests won't need this. Make this opt-in or
|
||||
// opt-out? Use some heuristic on the body type? Nagel-like
|
||||
// timers? Based on 'n'? Only last chunk of this for loop,
|
||||
// unless flow control tokens are low? For now, always.
|
||||
// If we change this, see comment below.
|
||||
err = cc.bw.Flush()
|
||||
}
|
||||
cc.wmu.Unlock()
|
||||
@ -921,28 +951,33 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
|
||||
}
|
||||
}
|
||||
|
||||
cc.wmu.Lock()
|
||||
if !sentEnd {
|
||||
if sentEnd {
|
||||
// Already sent END_STREAM (which implies we have no
|
||||
// trailers) and flushed, because currently all
|
||||
// WriteData frames above get a flush. So we're done.
|
||||
return nil
|
||||
}
|
||||
|
||||
var trls []byte
|
||||
if hasTrailers {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
trls = cc.encodeTrailers(req)
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
// Avoid forgetting to send an END_STREAM if the encoded
|
||||
// trailers are 0 bytes. Both results produce and END_STREAM.
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
|
||||
// Two ways to send END_STREAM: either with trailers, or
|
||||
// with an empty DATA frame.
|
||||
if len(trls) > 0 {
|
||||
err = cc.writeHeaders(cs.ID, true, trls)
|
||||
} else {
|
||||
err = cc.fr.WriteData(cs.ID, true, nil)
|
||||
}
|
||||
}
|
||||
if ferr := cc.bw.Flush(); ferr != nil && err == nil {
|
||||
err = ferr
|
||||
}
|
||||
cc.wmu.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -995,6 +1030,26 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
if host == "" {
|
||||
host = req.URL.Host
|
||||
}
|
||||
host, err := httplex.PunycodeHostPort(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var path string
|
||||
if req.Method != "CONNECT" {
|
||||
path = req.URL.RequestURI()
|
||||
if !validPseudoPath(path) {
|
||||
orig := path
|
||||
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
|
||||
if !validPseudoPath(path) {
|
||||
if req.URL.Opaque != "" {
|
||||
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid request :path %q", orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any invalid headers and return an error before we
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
@ -1018,8 +1073,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
cc.writeHeader(":authority", host)
|
||||
cc.writeHeader(":method", req.Method)
|
||||
if req.Method != "CONNECT" {
|
||||
cc.writeHeader(":path", req.URL.RequestURI())
|
||||
cc.writeHeader(":scheme", "https")
|
||||
cc.writeHeader(":path", path)
|
||||
cc.writeHeader(":scheme", req.URL.Scheme)
|
||||
}
|
||||
if trailers != "" {
|
||||
cc.writeHeader("trailer", trailers)
|
||||
@ -1146,6 +1201,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
|
||||
if andRemove && cs != nil && !cc.closed {
|
||||
cc.lastActive = time.Now()
|
||||
delete(cc.streams, id)
|
||||
if len(cc.streams) == 0 && cc.idleTimer != nil {
|
||||
cc.idleTimer.Reset(cc.idleTimeout)
|
||||
}
|
||||
close(cs.done)
|
||||
cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
|
||||
}
|
||||
@ -1188,28 +1246,38 @@ func (e GoAwayError) Error() string {
|
||||
e.LastStreamID, e.ErrCode, e.DebugData)
|
||||
}
|
||||
|
||||
func isEOFOrNetReadError(err error) bool {
|
||||
if err == io.EOF {
|
||||
return true
|
||||
}
|
||||
ne, ok := err.(*net.OpError)
|
||||
return ok && ne.Op == "read"
|
||||
}
|
||||
|
||||
func (rl *clientConnReadLoop) cleanup() {
|
||||
cc := rl.cc
|
||||
defer cc.tconn.Close()
|
||||
defer cc.t.connPool().MarkDead(cc)
|
||||
defer close(cc.readerDone)
|
||||
|
||||
if cc.idleTimer != nil {
|
||||
cc.idleTimer.Stop()
|
||||
}
|
||||
|
||||
// Close any response bodies if the server closes prematurely.
|
||||
// TODO: also do this if we've written the headers but not
|
||||
// gotten a response yet.
|
||||
err := cc.readerErr
|
||||
cc.mu.Lock()
|
||||
if err == io.EOF {
|
||||
if cc.goAway != nil {
|
||||
if cc.goAway != nil && isEOFOrNetReadError(err) {
|
||||
err = GoAwayError{
|
||||
LastStreamID: cc.goAway.LastStreamID,
|
||||
ErrCode: cc.goAway.ErrCode,
|
||||
DebugData: cc.goAwayDebug,
|
||||
}
|
||||
} else {
|
||||
} else if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
for _, cs := range rl.activeRes {
|
||||
cs.bufPipe.CloseWithError(err)
|
||||
}
|
||||
@ -1228,15 +1296,20 @@ func (rl *clientConnReadLoop) cleanup() {
|
||||
func (rl *clientConnReadLoop) run() error {
|
||||
cc := rl.cc
|
||||
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
|
||||
gotReply := false // ever saw a reply
|
||||
gotReply := false // ever saw a HEADERS reply
|
||||
gotSettings := false
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
if err != nil {
|
||||
cc.vlogf("Transport readFrame error: (%T) %v", err, err)
|
||||
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
|
||||
}
|
||||
if se, ok := err.(StreamError); ok {
|
||||
if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
|
||||
rl.endStreamError(cs, cc.fr.errDetail)
|
||||
cs.cc.writeStreamReset(cs.ID, se.Code, err)
|
||||
if se.Cause == nil {
|
||||
se.Cause = cc.fr.errDetail
|
||||
}
|
||||
rl.endStreamError(cs, se)
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
@ -1245,6 +1318,13 @@ func (rl *clientConnReadLoop) run() error {
|
||||
if VerboseLogs {
|
||||
cc.vlogf("http2: Transport received %s", summarizeFrame(f))
|
||||
}
|
||||
if !gotSettings {
|
||||
if _, ok := f.(*SettingsFrame); !ok {
|
||||
cc.logf("protocol error: received %T before a SETTINGS frame", f)
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
gotSettings = true
|
||||
}
|
||||
maybeIdle := false // whether frame might transition us to idle
|
||||
|
||||
switch f := f.(type) {
|
||||
@ -1273,6 +1353,9 @@ func (rl *clientConnReadLoop) run() error {
|
||||
cc.logf("Transport: unhandled response frame type %T", f)
|
||||
}
|
||||
if err != nil {
|
||||
if VerboseLogs {
|
||||
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
|
||||
@ -1522,10 +1605,27 @@ var errClosedResponseBody = errors.New("http2: response body closed")
|
||||
|
||||
func (b transportResponseBody) Close() error {
|
||||
cs := b.cs
|
||||
if cs.bufPipe.Err() != io.EOF {
|
||||
// TODO: write test for this
|
||||
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
|
||||
cc := cs.cc
|
||||
|
||||
serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
|
||||
unread := cs.bufPipe.Len()
|
||||
|
||||
if unread > 0 || !serverSentStreamEnd {
|
||||
cc.mu.Lock()
|
||||
cc.wmu.Lock()
|
||||
if !serverSentStreamEnd {
|
||||
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
|
||||
}
|
||||
// Return connection-level flow control.
|
||||
if unread > 0 {
|
||||
cc.inflow.add(int32(unread))
|
||||
cc.fr.WriteWindowUpdate(0, uint32(unread))
|
||||
}
|
||||
cc.bw.Flush()
|
||||
cc.wmu.Unlock()
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
cs.bufPipe.BreakWithError(errClosedResponseBody)
|
||||
return nil
|
||||
}
|
||||
@ -1533,6 +1633,7 @@ func (b transportResponseBody) Close() error {
|
||||
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
cc := rl.cc
|
||||
cs := cc.streamByID(f.StreamID, f.StreamEnded())
|
||||
data := f.Data()
|
||||
if cs == nil {
|
||||
cc.mu.Lock()
|
||||
neverSent := cc.nextStreamID
|
||||
@ -1546,10 +1647,22 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
// TODO: be stricter here? only silently ignore things which
|
||||
// we canceled, but not things which were closed normally
|
||||
// by the peer? Tough without accumulating too much state.
|
||||
|
||||
// But at least return their flow control:
|
||||
if f.Length > 0 {
|
||||
cc.mu.Lock()
|
||||
cc.inflow.add(int32(f.Length))
|
||||
cc.mu.Unlock()
|
||||
|
||||
cc.wmu.Lock()
|
||||
cc.fr.WriteWindowUpdate(0, uint32(f.Length))
|
||||
cc.bw.Flush()
|
||||
cc.wmu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if data := f.Data(); len(data) > 0 {
|
||||
if cs.bufPipe.b == nil {
|
||||
if f.Length > 0 {
|
||||
if len(data) > 0 && cs.bufPipe.b == nil {
|
||||
// Data frame after it's already closed?
|
||||
cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
@ -1557,19 +1670,32 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
|
||||
|
||||
// Check connection-level flow control.
|
||||
cc.mu.Lock()
|
||||
if cs.inflow.available() >= int32(len(data)) {
|
||||
cs.inflow.take(int32(len(data)))
|
||||
if cs.inflow.available() >= int32(f.Length) {
|
||||
cs.inflow.take(int32(f.Length))
|
||||
} else {
|
||||
cc.mu.Unlock()
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
// Return any padded flow control now, since we won't
|
||||
// refund it later on body reads.
|
||||
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
|
||||
cs.inflow.add(pad)
|
||||
cc.inflow.add(pad)
|
||||
cc.wmu.Lock()
|
||||
cc.fr.WriteWindowUpdate(0, uint32(pad))
|
||||
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
|
||||
cc.bw.Flush()
|
||||
cc.wmu.Unlock()
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
if len(data) > 0 {
|
||||
if _, err := cs.bufPipe.Write(data); err != nil {
|
||||
rl.endStreamError(cs, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if f.StreamEnded() {
|
||||
rl.endStream(cs)
|
||||
@ -1593,9 +1719,14 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
|
||||
}
|
||||
cs.bufPipe.closeWithErrorAndCode(err, code)
|
||||
delete(rl.activeRes, cs.ID)
|
||||
if cs.req.Close || cs.req.Header.Get("Connection") == "close" {
|
||||
if isConnectionCloseRequest(cs.req) {
|
||||
rl.closeWhenIdle = true
|
||||
}
|
||||
|
||||
select {
|
||||
case cs.resc <- resAndError{err: err}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *clientStream) copyTrailers() {
|
||||
@ -1623,18 +1754,39 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
|
||||
cc := rl.cc
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
return f.ForeachSetting(func(s Setting) error {
|
||||
|
||||
if f.IsAck() {
|
||||
if cc.wantSettingsAck {
|
||||
cc.wantSettingsAck = false
|
||||
return nil
|
||||
}
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
|
||||
err := f.ForeachSetting(func(s Setting) error {
|
||||
switch s.ID {
|
||||
case SettingMaxFrameSize:
|
||||
cc.maxFrameSize = s.Val
|
||||
case SettingMaxConcurrentStreams:
|
||||
cc.maxConcurrentStreams = s.Val
|
||||
case SettingInitialWindowSize:
|
||||
// TODO: error if this is too large.
|
||||
// Values above the maximum flow-control
|
||||
// window size of 2^31-1 MUST be treated as a
|
||||
// connection error (Section 5.4.1) of type
|
||||
// FLOW_CONTROL_ERROR.
|
||||
if s.Val > math.MaxInt32 {
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
|
||||
// TODO: adjust flow control of still-open
|
||||
// Adjust flow control of currently-open
|
||||
// frames by the difference of the old initial
|
||||
// window size and this one.
|
||||
delta := int32(s.Val) - int32(cc.initialWindowSize)
|
||||
for _, cs := range cc.streams {
|
||||
cs.flow.add(delta)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
|
||||
cc.initialWindowSize = s.Val
|
||||
default:
|
||||
// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
|
||||
@ -1642,6 +1794,16 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
|
||||
cc.fr.WriteSettingsAck()
|
||||
cc.bw.Flush()
|
||||
return cc.werr
|
||||
}
|
||||
|
||||
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
|
||||
@ -1678,7 +1840,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
|
||||
// which closes this, so there
|
||||
// isn't a race.
|
||||
default:
|
||||
err := StreamError{cs.ID, f.ErrCode}
|
||||
err := streamError(cs.ID, f.ErrCode)
|
||||
cs.resetErr = err
|
||||
close(cs.peerReset)
|
||||
cs.bufPipe.CloseWithError(err)
|
||||
@ -1688,10 +1850,56 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping sends a PING frame to the server and waits for the ack.
|
||||
// Public implementation is in go17.go and not_go17.go
|
||||
func (cc *ClientConn) ping(ctx contextContext) error {
|
||||
c := make(chan struct{})
|
||||
// Generate a random payload
|
||||
var p [8]byte
|
||||
for {
|
||||
if _, err := rand.Read(p[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
cc.mu.Lock()
|
||||
// check for dup before insert
|
||||
if _, found := cc.pings[p]; !found {
|
||||
cc.pings[p] = c
|
||||
cc.mu.Unlock()
|
||||
break
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
cc.wmu.Lock()
|
||||
if err := cc.fr.WritePing(false, p); err != nil {
|
||||
cc.wmu.Unlock()
|
||||
return err
|
||||
}
|
||||
if err := cc.bw.Flush(); err != nil {
|
||||
cc.wmu.Unlock()
|
||||
return err
|
||||
}
|
||||
cc.wmu.Unlock()
|
||||
select {
|
||||
case <-c:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-cc.readerDone:
|
||||
// connection closed
|
||||
return cc.readerErr
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
|
||||
if f.IsAck() {
|
||||
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
|
||||
// containing this flag."
|
||||
cc := rl.cc
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
// If ack, notify listener if any
|
||||
if c, ok := cc.pings[f.Data]; ok {
|
||||
close(c)
|
||||
delete(cc.pings, f.Data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
cc := rl.cc
|
||||
@ -1715,8 +1923,10 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
|
||||
}
|
||||
|
||||
func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
|
||||
// TODO: do something with err? send it as a debug frame to the peer?
|
||||
// But that's only in GOAWAY. Invent a new frame type? Is there one already?
|
||||
// TODO: map err to more interesting error codes, once the
|
||||
// HTTP community comes up with some. But currently for
|
||||
// RST_STREAM there's no equivalent to GOAWAY frame's debug
|
||||
// data, and the error codes are all pretty vague ("cancel").
|
||||
cc.wmu.Lock()
|
||||
cc.fr.WriteRSTStream(streamID, code)
|
||||
cc.bw.Flush()
|
||||
@ -1866,3 +2076,9 @@ func (s bodyWriterState) scheduleBodyWrite() {
|
||||
s.timer.Reset(s.delay)
|
||||
}
|
||||
}
|
||||
|
||||
// isConnectionCloseRequest reports whether req should use its own
|
||||
// connection for a single request and then close the connection.
|
||||
func isConnectionCloseRequest(req *http.Request) bool {
|
||||
return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
|
||||
}
|
||||
|
39
vendor/golang.org/x/net/lex/httplex/httplex.go
generated
vendored
39
vendor/golang.org/x/net/lex/httplex/httplex.go
generated
vendored
@ -10,8 +10,11 @@
|
||||
package httplex
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
@ -310,3 +313,39 @@ func ValidHeaderFieldValue(v string) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PunycodeHostPort returns the IDNA Punycode version
|
||||
// of the provided "host" or "host:port" string.
|
||||
func PunycodeHostPort(v string) (string, error) {
|
||||
if isASCII(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(v)
|
||||
if err != nil {
|
||||
// The input 'v' argument was just a "host" argument,
|
||||
// without a port. This error should not be returned
|
||||
// to the caller.
|
||||
host = v
|
||||
port = ""
|
||||
}
|
||||
host, err = idna.ToASCII(host)
|
||||
if err != nil {
|
||||
// Non-UTF-8? Not representable in Punycode, in any
|
||||
// case.
|
||||
return "", err
|
||||
}
|
||||
if port == "" {
|
||||
return host, nil
|
||||
}
|
||||
return net.JoinHostPort(host, port), nil
|
||||
}
|
||||
|
22
vendor/golang.org/x/net/trace/trace.go
generated
vendored
22
vendor/golang.org/x/net/trace/trace.go
generated
vendored
@ -91,7 +91,7 @@ var DebugUseAfterFinish = false
|
||||
// It returns two bools; the first indicates whether the page may be viewed at all,
|
||||
// and the second indicates whether sensitive events will be shown.
|
||||
//
|
||||
// AuthRequest may be replaced by a program to customise its authorisation requirements.
|
||||
// AuthRequest may be replaced by a program to customize its authorization requirements.
|
||||
//
|
||||
// The default AuthRequest function returns (true, true) if and only if the request
|
||||
// comes from localhost/127.0.0.1/[::1].
|
||||
@ -333,7 +333,8 @@ func New(family, title string) Trace {
|
||||
tr.ref()
|
||||
tr.Family, tr.Title = family, title
|
||||
tr.Start = time.Now()
|
||||
tr.events = make([]event, 0, maxEventsPerTrace)
|
||||
tr.maxEvents = maxEventsPerTrace
|
||||
tr.events = tr.eventsBuf[:0]
|
||||
|
||||
activeMu.RLock()
|
||||
s := activeTraces[tr.Family]
|
||||
@ -650,8 +651,8 @@ type event struct {
|
||||
Elapsed time.Duration // since previous event in trace
|
||||
NewDay bool // whether this event is on a different day to the previous event
|
||||
Recyclable bool // whether this event was passed via LazyLog
|
||||
What interface{} // string or fmt.Stringer
|
||||
Sensitive bool // whether this event contains sensitive information
|
||||
What interface{} // string or fmt.Stringer
|
||||
}
|
||||
|
||||
// WhenString returns a string representation of the elapsed time of the event.
|
||||
@ -694,12 +695,15 @@ type trace struct {
|
||||
// Append-only sequence of events (modulo discards).
|
||||
mu sync.RWMutex
|
||||
events []event
|
||||
maxEvents int
|
||||
|
||||
refs int32 // how many buckets this is in
|
||||
recycler func(interface{})
|
||||
disc discarded // scratch space to avoid allocation
|
||||
|
||||
finishStack []byte // where finish was called, if DebugUseAfterFinish is set
|
||||
|
||||
eventsBuf [4]event // preallocated buffer in case we only log a few events
|
||||
}
|
||||
|
||||
func (tr *trace) reset() {
|
||||
@ -711,11 +715,15 @@ func (tr *trace) reset() {
|
||||
tr.traceID = 0
|
||||
tr.spanID = 0
|
||||
tr.IsError = false
|
||||
tr.maxEvents = 0
|
||||
tr.events = nil
|
||||
tr.refs = 0
|
||||
tr.recycler = nil
|
||||
tr.disc = 0
|
||||
tr.finishStack = nil
|
||||
for i := range tr.eventsBuf {
|
||||
tr.eventsBuf[i] = event{}
|
||||
}
|
||||
}
|
||||
|
||||
// delta returns the elapsed time since the last event or the trace start,
|
||||
@ -753,11 +761,11 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
|
||||
e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
|
||||
tr.mu.Lock()
|
||||
e.Elapsed, e.NewDay = tr.delta(e.When)
|
||||
if len(tr.events) < cap(tr.events) {
|
||||
if len(tr.events) < tr.maxEvents {
|
||||
tr.events = append(tr.events, e)
|
||||
} else {
|
||||
// Discard the middle events.
|
||||
di := int((cap(tr.events) - 1) / 2)
|
||||
di := int((tr.maxEvents - 1) / 2)
|
||||
if d, ok := tr.events[di].What.(*discarded); ok {
|
||||
(*d)++
|
||||
} else {
|
||||
@ -777,7 +785,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
|
||||
go tr.recycler(tr.events[di+1].What)
|
||||
}
|
||||
copy(tr.events[di+1:], tr.events[di+2:])
|
||||
tr.events[cap(tr.events)-1] = e
|
||||
tr.events[tr.maxEvents-1] = e
|
||||
}
|
||||
tr.mu.Unlock()
|
||||
}
|
||||
@ -803,7 +811,7 @@ func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
|
||||
func (tr *trace) SetMaxEvents(m int) {
|
||||
// Always keep at least three events: first, discarded count, last.
|
||||
if len(tr.events) == 0 && m > 3 {
|
||||
tr.events = make([]event, 0, m)
|
||||
tr.maxEvents = m
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user