Merge pull request #46 from Random-Liu/wait-image-pulling

Wait and check image pulling progress.
This commit is contained in:
Lantao Liu 2017-05-26 17:24:47 -07:00 committed by GitHub
commit a49f66e0bb
101 changed files with 4384 additions and 1675 deletions

146
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{ {
"ImportPath": "github.com/kubernetes-incubator/cri-containerd", "ImportPath": "github.com/kubernetes-incubator/cri-containerd",
"GoVersion": "go1.7", "GoVersion": "go1.8",
"GodepVersion": "v79", "GodepVersion": "v79",
"Packages": [ "Packages": [
"./..." "./..."
@ -28,123 +28,127 @@
}, },
{ {
"ImportPath": "github.com/containerd/containerd", "ImportPath": "github.com/containerd/containerd",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/content", "ImportPath": "github.com/containerd/containerd/api/services/content",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/execution", "ImportPath": "github.com/containerd/containerd/api/services/execution",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/images", "ImportPath": "github.com/containerd/containerd/api/services/images",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/services/rootfs", "ImportPath": "github.com/containerd/containerd/api/services/rootfs",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/types/container", "ImportPath": "github.com/containerd/containerd/api/types/container",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/types/descriptor", "ImportPath": "github.com/containerd/containerd/api/types/descriptor",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/api/types/mount", "ImportPath": "github.com/containerd/containerd/api/types/mount",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/archive", "ImportPath": "github.com/containerd/containerd/archive",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/archive/compression", "ImportPath": "github.com/containerd/containerd/archive/compression",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/content", "ImportPath": "github.com/containerd/containerd/content",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/fs", "ImportPath": "github.com/containerd/containerd/fs",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/images", "ImportPath": "github.com/containerd/containerd/images",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/log", "ImportPath": "github.com/containerd/containerd/log",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/plugin", "ImportPath": "github.com/containerd/containerd/plugin",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/reference", "ImportPath": "github.com/containerd/containerd/reference",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/remotes", "ImportPath": "github.com/containerd/containerd/remotes",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/remotes/docker", "ImportPath": "github.com/containerd/containerd/remotes/docker",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/rootfs", "ImportPath": "github.com/containerd/containerd/rootfs",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/services/content", "ImportPath": "github.com/containerd/containerd/services/content",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/services/images", "ImportPath": "github.com/containerd/containerd/services/images",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/services/rootfs", "ImportPath": "github.com/containerd/containerd/services/rootfs",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/snapshot", "ImportPath": "github.com/containerd/containerd/snapshot",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
}, },
{ {
"ImportPath": "github.com/containerd/containerd/sys", "ImportPath": "github.com/containerd/containerd/sys",
"Comment": "v0.2.3-745-gbbeaab5", "Comment": "v0.2.3-856-g193abed",
"Rev": "bbeaab5ee38568daa84b8c233a8067b524588355" "Rev": "193abed96e06a45fead2ed86cfbb11c2a0a001a4"
},
{
"ImportPath": "github.com/containerd/continuity/sysx",
"Rev": "6414d06cab9e2fe082ea29ff42aab627e740d00c"
}, },
{ {
"ImportPath": "github.com/containernetworking/cni/libcni", "ImportPath": "github.com/containernetworking/cni/libcni",
@ -173,12 +177,12 @@
}, },
{ {
"ImportPath": "github.com/docker/distribution/digestset", "ImportPath": "github.com/docker/distribution/digestset",
"Comment": "v2.6.0-rc.1-130-gb38e583", "Comment": "v2.6.0-rc.1-130-gb38e5838",
"Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621" "Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621"
}, },
{ {
"ImportPath": "github.com/docker/distribution/reference", "ImportPath": "github.com/docker/distribution/reference",
"Comment": "v2.6.0-rc.1-130-gb38e583", "Comment": "v2.6.0-rc.1-130-gb38e5838",
"Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621" "Rev": "b38e5838b7b2f2ad48e06ec4b500011976080621"
}, },
{ {
@ -203,27 +207,27 @@
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/gogoproto", "ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.3-150-gd2e1ade", "Comment": "v0.3-150-gd2e1ade2",
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8" "Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/proto", "ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.3-150-gd2e1ade", "Comment": "v0.3-150-gd2e1ade2",
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8" "Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.3-150-gd2e1ade", "Comment": "v0.3-150-gd2e1ade2",
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8" "Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/sortkeys", "ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.3-150-gd2e1ade", "Comment": "v0.3-150-gd2e1ade2",
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8" "Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
}, },
{ {
"ImportPath": "github.com/gogo/protobuf/types", "ImportPath": "github.com/gogo/protobuf/types",
"Comment": "v0.3-150-gd2e1ade", "Comment": "v0.3-150-gd2e1ade2",
"Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8" "Rev": "d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8"
}, },
{ {
@ -267,7 +271,7 @@
}, },
{ {
"ImportPath": "github.com/opencontainers/runc/libcontainer/system", "ImportPath": "github.com/opencontainers/runc/libcontainer/system",
"Comment": "v1.0.0-rc3-21-g50401b5", "Comment": "v1.0.0-rc3-21-g50401b5b",
"Rev": "50401b5b4c2e01e4f1372b73a021742deeaf4e2d" "Rev": "50401b5b4c2e01e4f1372b73a021742deeaf4e2d"
}, },
{ {
@ -301,19 +305,15 @@
"ImportPath": "github.com/spf13/pflag", "ImportPath": "github.com/spf13/pflag",
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
}, },
{
"ImportPath": "github.com/stevvooe/continuity/sysx",
"Rev": "2abd81dfcc64c06e1eb16a081fd187798f4c2919"
},
{ {
"ImportPath": "github.com/stretchr/testify/assert", "ImportPath": "github.com/stretchr/testify/assert",
"Comment": "v1.1.4-6-g18a02ba", "Comment": "v1.1.4",
"Rev": "18a02ba4a312f95da08ff4cfc0055750ce50ae9e" "Rev": "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
}, },
{ {
"ImportPath": "github.com/stretchr/testify/require", "ImportPath": "github.com/stretchr/testify/require",
"Comment": "v1.1.4-6-g18a02ba", "Comment": "v1.1.4",
"Rev": "18a02ba4a312f95da08ff4cfc0055750ce50ae9e" "Rev": "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
}, },
{ {
"ImportPath": "github.com/syndtr/gocapability/capability", "ImportPath": "github.com/syndtr/gocapability/capability",
@ -330,35 +330,35 @@
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/net/context/ctxhttp", "ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" "Rev": "8b4af36cd21a1f85a7484b49feb7c79363106d8e"
}, },
{ {
"ImportPath": "golang.org/x/sync/errgroup", "ImportPath": "golang.org/x/sync/errgroup",

View File

@ -268,11 +268,11 @@ func (c *criContainerdService) getImageInfo(ctx context.Context, ref string) (
normalizedRef, err) normalizedRef, err)
} }
// Get image config // Get image config
desc, err := image.Config(ctx, c.contentProvider) desc, err := image.Config(ctx, c.contentStoreService)
if err != nil { if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image config descriptor: %v", err) return "", 0, nil, fmt.Errorf("failed to get image config descriptor: %v", err)
} }
rc, err := c.contentProvider.Reader(ctx, desc.Digest) rc, err := c.contentStoreService.Reader(ctx, desc.Digest)
if err != nil { if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image config reader: %v", err) return "", 0, nil, fmt.Errorf("failed to get image config reader: %v", err)
} }
@ -282,13 +282,13 @@ func (c *criContainerdService) getImageInfo(ctx context.Context, ref string) (
return "", 0, nil, fmt.Errorf("failed to decode image config: %v", err) return "", 0, nil, fmt.Errorf("failed to decode image config: %v", err)
} }
// Get image chainID // Get image chainID
diffIDs, err := image.RootFS(ctx, c.contentProvider) diffIDs, err := image.RootFS(ctx, c.contentStoreService)
if err != nil { if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image diff ids: %v", err) return "", 0, nil, fmt.Errorf("failed to get image diff ids: %v", err)
} }
chainID := identity.ChainID(diffIDs) chainID := identity.ChainID(diffIDs)
// Get image size // Get image size
size, err := image.Size(ctx, c.contentProvider) size, err := image.Size(ctx, c.contentStoreService)
if err != nil { if err != nil {
return "", 0, nil, fmt.Errorf("failed to get image size: %v", err) return "", 0, nil, fmt.Errorf("failed to get image size: %v", err)
} }
@ -325,7 +325,7 @@ func (c *criContainerdService) localResolve(ctx context.Context, ref string) (st
return "", fmt.Errorf("an error occurred when getting image %q from containerd image store: %v", return "", fmt.Errorf("an error occurred when getting image %q from containerd image store: %v",
normalized.String(), err) normalized.String(), err)
} }
desc, err := image.Config(ctx, c.contentProvider) desc, err := image.Config(ctx, c.contentStoreService)
if err != nil { if err != nil {
return "", fmt.Errorf("failed to get image config descriptor: %v", err) return "", fmt.Errorf("failed to get image config descriptor: %v", err)
} }

View File

@ -17,13 +17,18 @@ limitations under the License.
package server package server
import ( import (
gocontext "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http"
"sync"
"time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
containerdimages "github.com/containerd/containerd/images" containerdimages "github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker"
rootfsservice "github.com/containerd/containerd/services/rootfs"
"github.com/golang/glog" "github.com/golang/glog"
imagedigest "github.com/opencontainers/go-digest" imagedigest "github.com/opencontainers/go-digest"
imagespec "github.com/opencontainers/image-spec/specs-go/v1" imagespec "github.com/opencontainers/image-spec/specs-go/v1"
@ -72,7 +77,6 @@ import (
// contents are missing but snapshots are ready, is the image still "READY"? // contents are missing but snapshots are ready, is the image still "READY"?
// PullImage pulls an image with authentication config. // PullImage pulls an image with authentication config.
// TODO(mikebrow): add authentication
// TODO(mikebrow): harden api (including figuring out at what layer we should be blocking on duplicate requests.) // TODO(mikebrow): harden api (including figuring out at what layer we should be blocking on duplicate requests.)
func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (retRes *runtime.PullImageResponse, retErr error) { func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullImageRequest) (retRes *runtime.PullImageResponse, retErr error) {
glog.V(2).Infof("PullImage %q with auth config %+v", r.GetImage().GetImage(), r.GetAuth()) glog.V(2).Infof("PullImage %q with auth config %+v", r.GetImage().GetImage(), r.GetAuth())
@ -152,13 +156,46 @@ func (c *criContainerdService) PullImage(ctx context.Context, r *runtime.PullIma
return &runtime.PullImageResponse{ImageRef: imageID}, err return &runtime.PullImageResponse{ImageRef: imageID}, err
} }
// resourceSet is the helper struct to help tracking all resources associated
// with an image.
type resourceSet struct {
sync.Mutex
resources map[string]struct{}
}
func newResourceSet() *resourceSet {
return &resourceSet{resources: make(map[string]struct{})}
}
func (r *resourceSet) add(resource string) {
r.Lock()
defer r.Unlock()
r.resources[resource] = struct{}{}
}
// all returns an array of all resources added.
func (r *resourceSet) all() map[string]struct{} {
r.Lock()
defer r.Unlock()
resources := make(map[string]struct{})
for resource := range r.resources {
resources[resource] = struct{}{}
}
return resources
}
// pullImage pulls image and returns image id (config digest) and manifest digest. // pullImage pulls image and returns image id (config digest) and manifest digest.
// The ref should be normalized image reference. // The ref should be normalized image reference.
// TODO(random-liu): [P0] Wait for all downloadings to be done before return. // TODO(random-liu): [P0] Wait for all downloadings to be done before return.
func (c *criContainerdService) pullImage(ctx context.Context, ref string) ( func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
imagedigest.Digest, imagedigest.Digest, error) { imagedigest.Digest, imagedigest.Digest, error) {
// Resolve the image reference to get descriptor and fetcher. // Resolve the image reference to get descriptor and fetcher.
resolver := docker.NewResolver() resolver := docker.NewResolver(docker.ResolverOptions{
// TODO(random-liu): Add authentication by setting credentials.
// TODO(random-liu): Handle https.
PlainHTTP: true,
Client: http.DefaultClient,
})
_, desc, fetcher, err := resolver.Resolve(ctx, ref) _, desc, fetcher, err := resolver.Resolve(ctx, ref)
if err != nil { if err != nil {
return "", "", fmt.Errorf("failed to resolve ref %q: %v", ref, err) return "", "", fmt.Errorf("failed to resolve ref %q: %v", ref, err)
@ -178,19 +215,37 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
} }
// TODO(random-liu): What if following operations fail? Do we need to do cleanup? // TODO(random-liu): What if following operations fail? Do we need to do cleanup?
resources := newResourceSet()
glog.V(4).Infof("Start downloading resources for image %q", ref)
// Fetch all image resources into content store. // Fetch all image resources into content store.
// Dispatch a handler which will run a sequence of handlers to: // Dispatch a handler which will run a sequence of handlers to:
// 1) fetch the object using a FetchHandler; // 1) track all resources associated using a customized handler;
// 2) recurse through any sub-layers via a ChildrenHandler. // 2) fetch the object using a FetchHandler;
// 3) recurse through any sub-layers via a ChildrenHandler.
err = containerdimages.Dispatch( err = containerdimages.Dispatch(
ctx, ctx,
containerdimages.Handlers( containerdimages.Handlers(
remotes.FetchHandler(c.contentIngester, fetcher), containerdimages.HandlerFunc(func(ctx gocontext.Context, desc imagespec.Descriptor) (
containerdimages.ChildrenHandler(c.contentProvider)), []imagespec.Descriptor, error) {
resources.add(remotes.MakeRefKey(ctx, desc))
return nil, nil
}),
remotes.FetchHandler(c.contentStoreService, fetcher),
containerdimages.ChildrenHandler(c.contentStoreService)),
desc) desc)
if err != nil { if err != nil {
return "", "", fmt.Errorf("failed to fetch image %q desc %+v: %v", ref, desc, err) // Dispatch returns error when requested resources are locked.
// In that case, we should start waiting and checking the pulling
// progress.
// TODO(random-liu): Check specific resource locked error type.
glog.V(5).Infof("Dispatch for %q returns error: %v", ref, err)
} }
// Wait for the image pulling to finish
if err := c.waitForResourcesDownloading(ctx, resources.all()); err != nil {
return "", "", fmt.Errorf("failed to wait for image %q downloading: %v", ref, err)
}
glog.V(4).Infof("Finished downloading resources for image %q", ref)
image, err := c.imageStoreService.Get(ctx, ref) image, err := c.imageStoreService.Get(ctx, ref)
if err != nil { if err != nil {
@ -198,7 +253,7 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
} }
// Read the image manifest from content store. // Read the image manifest from content store.
manifestDigest := image.Target.Digest manifestDigest := image.Target.Digest
p, err := content.ReadBlob(ctx, c.contentProvider, manifestDigest) p, err := content.ReadBlob(ctx, c.contentStoreService, manifestDigest)
if err != nil { if err != nil {
return "", "", fmt.Errorf("readblob failed for manifest digest %q: %v", manifestDigest, err) return "", "", fmt.Errorf("readblob failed for manifest digest %q: %v", manifestDigest, err)
} }
@ -209,18 +264,56 @@ func (c *criContainerdService) pullImage(ctx context.Context, ref string) (
} }
// Unpack the image layers into snapshots. // Unpack the image layers into snapshots.
if _, err = c.rootfsUnpacker.Unpack(ctx, manifest.Layers); err != nil { rootfsUnpacker := rootfsservice.NewUnpackerFromClient(c.rootfsService)
if _, err = rootfsUnpacker.Unpack(ctx, manifest.Layers); err != nil {
return "", "", fmt.Errorf("unpack failed for manifest layers %+v: %v", manifest.Layers, err) return "", "", fmt.Errorf("unpack failed for manifest layers %+v: %v", manifest.Layers, err)
} }
// TODO(random-liu): Considering how to deal with the disk usage of content. // TODO(random-liu): Considering how to deal with the disk usage of content.
configDesc, err := image.Config(ctx, c.contentProvider) configDesc, err := image.Config(ctx, c.contentStoreService)
if err != nil { if err != nil {
return "", "", fmt.Errorf("failed to get config descriptor for image %q: %v", ref, err) return "", "", fmt.Errorf("failed to get config descriptor for image %q: %v", ref, err)
} }
return configDesc.Digest, manifestDigest, nil return configDesc.Digest, manifestDigest, nil
} }
// waitDownloadingPollInterval is the interval to check resource downloading progress.
const waitDownloadingPollInterval = 200 * time.Millisecond
// waitForResourcesDownloading waits for all resource downloading to finish.
func (c *criContainerdService) waitForResourcesDownloading(ctx context.Context, resources map[string]struct{}) error {
ticker := time.NewTicker(waitDownloadingPollInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// TODO(random-liu): Use better regexp when containerd `MakeRefKey` contains more
// information.
statuses, err := c.contentStoreService.Status(ctx, "")
if err != nil {
return fmt.Errorf("failed to get content status: %v", err)
}
pulling := false
// TODO(random-liu): Move Dispatch into a separate goroutine, so that we could report
// image pulling progress concurrently.
for _, status := range statuses {
_, ok := resources[status.Ref]
if ok {
glog.V(5).Infof("Pulling resource %q with progress %d/%d",
status.Ref, status.Offset, status.Total)
pulling = true
}
}
if !pulling {
return nil
}
case <-ctx.Done():
// TODO(random-liu): Abort ongoing pulling if cancelled.
return fmt.Errorf("image resources pulling is cancelled")
}
}
}
// insertToStringSlice is a helper function to insert a string into the string slice // insertToStringSlice is a helper function to insert a string into the string slice
// if the string is not in the slice yet. // if the string is not in the slice yet.
func insertToStringSlice(ss []string, s string) []string { func insertToStringSlice(ss []string, s string) []string {

View File

@ -17,6 +17,8 @@ limitations under the License.
package server package server
import ( import (
"fmt"
"sync"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -72,3 +74,22 @@ func TestUpdateImageMetadata(t *testing.T) {
assert.Equal(t, test.expectedRepoDigests, m.RepoDigests) assert.Equal(t, test.expectedRepoDigests, m.RepoDigests)
} }
} }
func TestResources(t *testing.T) {
const threads = 10
var wg sync.WaitGroup
r := newResourceSet()
for i := 0; i < threads; i++ {
wg.Add(1)
go func(ref string) {
r.add(ref)
wg.Done()
}(fmt.Sprintf("sha256:%d", i))
}
wg.Wait()
refs := r.all()
for i := 0; i < threads; i++ {
_, ok := refs[fmt.Sprintf("sha256:%d", i)]
assert.True(t, ok)
}
}

View File

@ -29,10 +29,8 @@ import (
rootfsapi "github.com/containerd/containerd/api/services/rootfs" rootfsapi "github.com/containerd/containerd/api/services/rootfs"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/rootfs"
contentservice "github.com/containerd/containerd/services/content" contentservice "github.com/containerd/containerd/services/content"
imagesservice "github.com/containerd/containerd/services/images" imagesservice "github.com/containerd/containerd/services/images"
rootfsservice "github.com/containerd/containerd/services/rootfs"
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata" "github.com/kubernetes-incubator/cri-containerd/pkg/metadata"
"github.com/kubernetes-incubator/cri-containerd/pkg/metadata/store" "github.com/kubernetes-incubator/cri-containerd/pkg/metadata/store"
@ -85,15 +83,10 @@ type criContainerdService struct {
containerNameIndex *registrar.Registrar containerNameIndex *registrar.Registrar
// containerService is containerd container service client. // containerService is containerd container service client.
containerService execution.ContainerServiceClient containerService execution.ContainerServiceClient
// contentIngester is the containerd service to ingest content into // contentStoreService is the containerd content service client..
// content store. contentStoreService content.Store
contentIngester content.Ingester // rootfsService is the containerd rootfs service client.
// contentProvider is the containerd service to get content from rootfsService rootfsapi.RootFSClient
// content store.
contentProvider content.Provider
// rootfsUnpacker is the containerd service to unpack image content
// into snapshots.
rootfsUnpacker rootfs.Unpacker
// imageStoreService is the containerd service to store and track // imageStoreService is the containerd service to store and track
// image metadata. // image metadata.
imageStoreService images.Store imageStoreService images.Store
@ -103,7 +96,6 @@ type criContainerdService struct {
// NewCRIContainerdService returns a new instance of CRIContainerdService // NewCRIContainerdService returns a new instance of CRIContainerdService
func NewCRIContainerdService(conn *grpc.ClientConn, rootDir, networkPluginBinDir, networkPluginConfDir string) (CRIContainerdService, error) { func NewCRIContainerdService(conn *grpc.ClientConn, rootDir, networkPluginBinDir, networkPluginConfDir string) (CRIContainerdService, error) {
// TODO: Initialize different containerd clients.
// TODO(random-liu): [P2] Recover from runtime state and metadata store. // TODO(random-liu): [P2] Recover from runtime state and metadata store.
c := &criContainerdService{ c := &criContainerdService{
os: osinterface.RealOS{}, os: osinterface.RealOS{},
@ -119,9 +111,8 @@ func NewCRIContainerdService(conn *grpc.ClientConn, rootDir, networkPluginBinDir
containerNameIndex: registrar.NewRegistrar(), containerNameIndex: registrar.NewRegistrar(),
containerService: execution.NewContainerServiceClient(conn), containerService: execution.NewContainerServiceClient(conn),
imageStoreService: imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)), imageStoreService: imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)),
contentIngester: contentservice.NewIngesterFromClient(contentapi.NewContentClient(conn)), contentStoreService: contentservice.NewStoreFromClient(contentapi.NewContentClient(conn)),
contentProvider: contentservice.NewProviderFromClient(contentapi.NewContentClient(conn)), rootfsService: rootfsapi.NewRootFSClient(conn),
rootfsUnpacker: rootfsservice.NewUnpackerFromClient(rootfsapi.NewRootFSClient(conn)),
} }
netPlugin, err := ocicni.InitCNI(networkPluginBinDir, networkPluginConfDir) netPlugin, err := ocicni.InitCNI(networkPluginBinDir, networkPluginConfDir)

View File

@ -359,3 +359,15 @@ func (f *FakeExecutionClient) CloseStdin(ctx context.Context, closeStdinOpts *ex
// TODO: implement CloseStdin() // TODO: implement CloseStdin()
return nil, nil return nil, nil
} }
// Pause is a test implementation of execution.Pause
func (f *FakeExecutionClient) Pause(ctx context.Context, in *execution.PauseRequest, opts ...grpc.CallOption) (*googleprotobuf.Empty, error) {
// TODO: implement Pause()
return nil, nil
}
// Resume is a test implementation of execution.Resume
func (f *FakeExecutionClient) Resume(ctx context.Context, in *execution.ResumeRequest, opts ...grpc.CallOption) (*googleprotobuf.Empty, error) {
// TODO: implement Resume()
return nil, nil
}

View File

@ -29,9 +29,10 @@ install:
script: script:
- export GOOS=$TRAVIS_GOOS - export GOOS=$TRAVIS_GOOS
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED - export CGO_ENABLED=$TRAVIS_CGO_ENABLED
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
- make fmt - make fmt
- make vet
- make binaries - make binaries
- TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
- if [ "$GOOS" != "windows" ]; then make coverage ; fi - if [ "$GOOS" != "windows" ]; then make coverage ; fi
- if [ "$GOOS" != "windows" ]; then sudo PATH=$PATH GOPATH=$GOPATH make root-coverage ; fi - if [ "$GOOS" != "windows" ]; then sudo PATH=$PATH GOPATH=$GOPATH make root-coverage ; fi

View File

@ -59,6 +59,23 @@ If a candidate is approved, a maintainer will contact the candidate to invite
the candidate to open a pull request that adds the contributor to the the candidate to open a pull request that adds the contributor to the
MAINTAINERS file. The candidate becomes a maintainer once the pull request is MAINTAINERS file. The candidate becomes a maintainer once the pull request is
merged. merged.
"""
[Rules.adding-sub-projects]
title = "How are sub projects added?"
text = """
Similar to adding maintainers, new sub projects can be added to containerd
GitHub organization as long as they adhere to the CNCF
[charter](https://www.cncf.io/about/charter/) and mission. After a project
proposal has been announced on a public forum (GitHub issue or mailing list),
the existing maintainers are given five business days to discuss the new
project, raise objections and cast their vote. Projects must be approved by at
least 66% of the current maintainers by adding their vote.
If a project is approved, a maintainer will add the project to the containerd
GitHub organization, and make an announcement on a public forum.
""" """
[Rules.stepping-down-policy] [Rules.stepping-down-policy]

View File

@ -28,7 +28,7 @@ SNAPSHOT_PACKAGES=$(shell go list ./snapshot/...)
# Project binaries. # Project binaries.
COMMANDS=ctr containerd protoc-gen-gogoctrd dist ctrd-protobuild COMMANDS=ctr containerd protoc-gen-gogoctrd dist ctrd-protobuild
ifneq ("$(GOOS)", "windows") ifeq ("$(GOOS)", "linux")
COMMANDS += containerd-shim COMMANDS += containerd-shim
endif endif
BINARIES=$(addprefix bin/,$(COMMANDS)) BINARIES=$(addprefix bin/,$(COMMANDS))
@ -79,7 +79,7 @@ checkprotos: protos ## check if protobufs needs to be generated again
# imports # imports
vet: binaries ## run go vet vet: binaries ## run go vet
@echo "$(WHALE) $@" @echo "$(WHALE) $@"
@test -z "$$(go vet ${PACKAGES} 2>&1 | grep -v 'constant [0-9]* not a string in call to Errorf' | egrep -v '(timestamp_test.go|duration_test.go|exit status 1)' | tee /dev/stderr)" @test -z "$$(go vet ${PACKAGES} 2>&1 | grep -v 'constant [0-9]* not a string in call to Errorf' | grep -v 'unrecognized printf verb 'r'' | egrep -v '(timestamp_test.go|duration_test.go|fetch.go|exit status 1)' | tee /dev/stderr)"
fmt: ## run go fmt fmt: ## run go fmt
@echo "$(WHALE) $@" @echo "$(WHALE) $@"

View File

@ -1,6 +1,7 @@
![banner](/docs/images/containerd-dark.png?raw=true) ![banner](/docs/images/containerd-dark.png?raw=true)
[![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd) [![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.. containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc..

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,14 @@ service Content {
// existence. // existence.
rpc Info(InfoRequest) returns (InfoResponse); rpc Info(InfoRequest) returns (InfoResponse);
// List streams the entire set of content as Info objects and closes the
// stream.
//
// Typically, this will yield a large response, chunked into messages.
// Clients should make provisions to ensure they can handle the entire data
// set.
rpc List(ListContentRequest) returns (stream ListContentResponse);
// Delete will delete the referenced object. // Delete will delete the referenced object.
rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty); rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
@ -25,9 +33,10 @@ service Content {
// Status returns the status of ongoing object ingestions, started via // Status returns the status of ongoing object ingestions, started via
// Write. // Write.
// //
// For active ingestions, the status will be streamed until the client // Only those matching the regular expression will be provided in the
// closes the connection or all matched ingestions are committed. // response. If the provided regular expression is empty, all ingestions
rpc Status(StatusRequest) returns (stream StatusResponse); // will be provided.
rpc Status(StatusRequest) returns (StatusResponse);
// Write begins or resumes writes to a resource identified by a unique ref. // Write begins or resumes writes to a resource identified by a unique ref.
// Only one active stream may exist at a time for each ref. // Only one active stream may exist at a time for each ref.
@ -46,13 +55,13 @@ service Content {
// When completed, the commit flag should be set to true. If expected size // When completed, the commit flag should be set to true. If expected size
// or digest is set, the content will be validated against those values. // or digest is set, the content will be validated against those values.
rpc Write(stream WriteRequest) returns (stream WriteResponse); rpc Write(stream WriteRequest) returns (stream WriteResponse);
// Abort cancels the ongoing write named in the request. Any resources
// associated with the write will be collected.
rpc Abort(AbortRequest) returns (google.protobuf.Empty);
} }
message InfoRequest { message Info {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message InfoResponse {
// Digest is the hash identity of the blob. // Digest is the hash identity of the blob.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
@ -63,6 +72,20 @@ message InfoResponse {
google.protobuf.Timestamp committed_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; google.protobuf.Timestamp committed_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
} }
message InfoRequest {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message InfoResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message ListContentRequest {}
message ListContentResponse {
repeated Info info = 1 [(gogoproto.nullable) = false];
}
message DeleteContentRequest { message DeleteContentRequest {
// Digest specifies which content to delete. // Digest specifies which content to delete.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
@ -90,6 +113,22 @@ message ReadResponse {
bytes data = 2; // actual data bytes data = 2; // actual data
} }
message StatusRequest {
string regexp = 1;
}
message Status {
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string ref = 3;
int64 offset = 4;
int64 total = 5;
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message StatusResponse {
repeated Status statuses = 1 [(gogoproto.nullable) = false];
}
// WriteAction defines the behavior of a WriteRequest. // WriteAction defines the behavior of a WriteRequest.
enum WriteAction { enum WriteAction {
@ -116,12 +155,6 @@ enum WriteAction {
// //
// This action will always terminate the write. // This action will always terminate the write.
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"]; COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
// WriteActionAbort will release any resources associated with the write
// and free up the ref for a completely new set of writes.
//
// This action will always terminate the write.
ABORT = -1 [(gogoproto.enumvalue_customname) = "WriteActionAbort"];
} }
// WriteRequest writes data to the request ref at offset. // WriteRequest writes data to the request ref at offset.
@ -213,20 +246,10 @@ message WriteResponse {
// Digest, if present, includes the digest up to the currently committed // Digest, if present, includes the digest up to the currently committed
// bytes. If action is commit, this field will be set. It is implementation // bytes. If action is commit, this field will be set. It is implementation
// defined if this is set for other actions, except abort. On abort, this // defined if this is set for other actions.
// will be empty.
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
} }
message StatusRequest { message AbortRequest {
repeated string refs = 1; string ref = 1;
repeated string prefix = 2;
}
message StatusResponse {
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string ref = 3;
int64 offset = 4;
int64 total = 5;
} }

View File

@ -23,6 +23,8 @@
ExecResponse ExecResponse
PtyRequest PtyRequest
CloseStdinRequest CloseStdinRequest
PauseRequest
ResumeRequest
*/ */
package execution package execution
@ -193,6 +195,22 @@ func (m *CloseStdinRequest) Reset() { *m = CloseStdinRequest{
func (*CloseStdinRequest) ProtoMessage() {} func (*CloseStdinRequest) ProtoMessage() {}
func (*CloseStdinRequest) Descriptor() ([]byte, []int) { return fileDescriptorExecution, []int{13} } func (*CloseStdinRequest) Descriptor() ([]byte, []int) { return fileDescriptorExecution, []int{13} }
type PauseRequest struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (m *PauseRequest) Reset() { *m = PauseRequest{} }
func (*PauseRequest) ProtoMessage() {}
func (*PauseRequest) Descriptor() ([]byte, []int) { return fileDescriptorExecution, []int{14} }
type ResumeRequest struct {
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (m *ResumeRequest) Reset() { *m = ResumeRequest{} }
func (*ResumeRequest) ProtoMessage() {}
func (*ResumeRequest) Descriptor() ([]byte, []int) { return fileDescriptorExecution, []int{15} }
func init() { func init() {
proto.RegisterType((*CreateRequest)(nil), "containerd.v1.services.CreateRequest") proto.RegisterType((*CreateRequest)(nil), "containerd.v1.services.CreateRequest")
proto.RegisterType((*CreateResponse)(nil), "containerd.v1.services.CreateResponse") proto.RegisterType((*CreateResponse)(nil), "containerd.v1.services.CreateResponse")
@ -208,6 +226,8 @@ func init() {
proto.RegisterType((*ExecResponse)(nil), "containerd.v1.services.ExecResponse") proto.RegisterType((*ExecResponse)(nil), "containerd.v1.services.ExecResponse")
proto.RegisterType((*PtyRequest)(nil), "containerd.v1.services.PtyRequest") proto.RegisterType((*PtyRequest)(nil), "containerd.v1.services.PtyRequest")
proto.RegisterType((*CloseStdinRequest)(nil), "containerd.v1.services.CloseStdinRequest") proto.RegisterType((*CloseStdinRequest)(nil), "containerd.v1.services.CloseStdinRequest")
proto.RegisterType((*PauseRequest)(nil), "containerd.v1.services.PauseRequest")
proto.RegisterType((*ResumeRequest)(nil), "containerd.v1.services.ResumeRequest")
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
@ -231,6 +251,8 @@ type ContainerServiceClient interface {
Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error) Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error)
Pty(ctx context.Context, in *PtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) Pty(ctx context.Context, in *PtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
CloseStdin(ctx context.Context, in *CloseStdinRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) CloseStdin(ctx context.Context, in *CloseStdinRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
Pause(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
Resume(ctx context.Context, in *ResumeRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
} }
type containerServiceClient struct { type containerServiceClient struct {
@ -354,6 +376,24 @@ func (c *containerServiceClient) CloseStdin(ctx context.Context, in *CloseStdinR
return out, nil return out, nil
} }
func (c *containerServiceClient) Pause(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
out := new(google_protobuf.Empty)
err := grpc.Invoke(ctx, "/containerd.v1.services.ContainerService/Pause", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *containerServiceClient) Resume(ctx context.Context, in *ResumeRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
out := new(google_protobuf.Empty)
err := grpc.Invoke(ctx, "/containerd.v1.services.ContainerService/Resume", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ContainerService service // Server API for ContainerService service
type ContainerServiceServer interface { type ContainerServiceServer interface {
@ -367,6 +407,8 @@ type ContainerServiceServer interface {
Exec(context.Context, *ExecRequest) (*ExecResponse, error) Exec(context.Context, *ExecRequest) (*ExecResponse, error)
Pty(context.Context, *PtyRequest) (*google_protobuf.Empty, error) Pty(context.Context, *PtyRequest) (*google_protobuf.Empty, error)
CloseStdin(context.Context, *CloseStdinRequest) (*google_protobuf.Empty, error) CloseStdin(context.Context, *CloseStdinRequest) (*google_protobuf.Empty, error)
Pause(context.Context, *PauseRequest) (*google_protobuf.Empty, error)
Resume(context.Context, *ResumeRequest) (*google_protobuf.Empty, error)
} }
func RegisterContainerServiceServer(s *grpc.Server, srv ContainerServiceServer) { func RegisterContainerServiceServer(s *grpc.Server, srv ContainerServiceServer) {
@ -556,6 +598,42 @@ func _ContainerService_CloseStdin_Handler(srv interface{}, ctx context.Context,
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _ContainerService_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PauseRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ContainerServiceServer).Pause(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.v1.services.ContainerService/Pause",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ContainerServiceServer).Pause(ctx, req.(*PauseRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ContainerService_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ResumeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ContainerServiceServer).Resume(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.v1.services.ContainerService/Resume",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ContainerServiceServer).Resume(ctx, req.(*ResumeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ContainerService_serviceDesc = grpc.ServiceDesc{ var _ContainerService_serviceDesc = grpc.ServiceDesc{
ServiceName: "containerd.v1.services.ContainerService", ServiceName: "containerd.v1.services.ContainerService",
HandlerType: (*ContainerServiceServer)(nil), HandlerType: (*ContainerServiceServer)(nil),
@ -596,6 +674,14 @@ var _ContainerService_serviceDesc = grpc.ServiceDesc{
MethodName: "CloseStdin", MethodName: "CloseStdin",
Handler: _ContainerService_CloseStdin_Handler, Handler: _ContainerService_CloseStdin_Handler,
}, },
{
MethodName: "Pause",
Handler: _ContainerService_Pause_Handler,
},
{
MethodName: "Resume",
Handler: _ContainerService_Resume_Handler,
},
}, },
Streams: []grpc.StreamDesc{ Streams: []grpc.StreamDesc{
{ {
@ -1083,6 +1169,54 @@ func (m *CloseStdinRequest) MarshalTo(dAtA []byte) (int, error) {
return i, nil return i, nil
} }
func (m *PauseRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PauseRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintExecution(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
return i, nil
}
func (m *ResumeRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResumeRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintExecution(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
return i, nil
}
func encodeFixed64Execution(dAtA []byte, offset int, v uint64) int { func encodeFixed64Execution(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v) dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8) dAtA[offset+1] = uint8(v >> 8)
@ -1317,6 +1451,26 @@ func (m *CloseStdinRequest) Size() (n int) {
return n return n
} }
func (m *PauseRequest) Size() (n int) {
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 1 + l + sovExecution(uint64(l))
}
return n
}
func (m *ResumeRequest) Size() (n int) {
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 1 + l + sovExecution(uint64(l))
}
return n
}
func sovExecution(x uint64) (n int) { func sovExecution(x uint64) (n int) {
for { for {
n++ n++
@ -1489,6 +1643,26 @@ func (this *CloseStdinRequest) String() string {
}, "") }, "")
return s return s
} }
func (this *PauseRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PauseRequest{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`}`,
}, "")
return s
}
func (this *ResumeRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ResumeRequest{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`}`,
}, "")
return s
}
func valueToStringExecution(v interface{}) string { func valueToStringExecution(v interface{}) string {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.IsNil() { if rv.IsNil() {
@ -3060,6 +3234,164 @@ func (m *CloseStdinRequest) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *PauseRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecution
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PauseRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PauseRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecution
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthExecution
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecution(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecution
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResumeRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecution
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResumeRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResumeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecution
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthExecution
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecution(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecution
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipExecution(dAtA []byte) (n int, err error) { func skipExecution(dAtA []byte) (n int, err error) {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0
@ -3170,56 +3502,59 @@ func init() {
} }
var fileDescriptorExecution = []byte{ var fileDescriptorExecution = []byte{
// 814 bytes of a gzipped FileDescriptorProto // 854 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcf, 0x6f, 0xe2, 0x46, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x4f, 0x8f, 0xdb, 0x44,
0x14, 0x8e, 0xf9, 0xe1, 0x90, 0x47, 0x48, 0xd3, 0x51, 0x84, 0x5c, 0x57, 0x02, 0xe4, 0x26, 0x29, 0x14, 0x5f, 0xe7, 0x8f, 0x37, 0x7d, 0xd9, 0x94, 0x32, 0x5a, 0xad, 0x8c, 0x91, 0x92, 0xc8, 0xb4,
0xbd, 0x98, 0x96, 0xde, 0xaa, 0xb6, 0x12, 0x21, 0xa8, 0x8a, 0xd2, 0x34, 0xa9, 0xa9, 0xd4, 0x63, 0x25, 0x5c, 0x1c, 0x58, 0x6e, 0x08, 0x90, 0xb2, 0xd9, 0xa8, 0xaa, 0x4a, 0xe9, 0xe2, 0x20, 0x71,
0xe4, 0xe0, 0x09, 0x8c, 0x64, 0x3c, 0xae, 0x67, 0x9c, 0x86, 0x5b, 0x7b, 0xef, 0xa1, 0x7f, 0xc5, 0xac, 0xbc, 0xf1, 0x6c, 0x32, 0x92, 0xe3, 0x31, 0x9e, 0xf1, 0xb2, 0xb9, 0xc1, 0x9d, 0x03, 0x5f,
0xde, 0xf7, 0xbf, 0xc8, 0x71, 0x8f, 0x7b, 0xca, 0x6e, 0xf8, 0x4b, 0x56, 0xe3, 0x1f, 0x60, 0x08, 0x85, 0x0b, 0x9f, 0x61, 0x8f, 0x1c, 0x39, 0x15, 0x9a, 0x4f, 0x82, 0x66, 0xc6, 0x4e, 0xec, 0x6c,
0xb3, 0xde, 0x5c, 0xd0, 0x7b, 0x8f, 0xf7, 0xc6, 0xdf, 0xfb, 0xe6, 0xfb, 0x6c, 0xf8, 0x65, 0x4c, 0xa6, 0xee, 0x5e, 0xa2, 0x79, 0x2f, 0xef, 0xbd, 0xf9, 0xbd, 0x37, 0xbf, 0xdf, 0x33, 0x3c, 0x9b,
0xf8, 0x24, 0xbc, 0x31, 0x47, 0x74, 0xda, 0x19, 0x51, 0x8f, 0xdb, 0xc4, 0xc3, 0x81, 0x93, 0x0d, 0x13, 0xbe, 0x48, 0x2f, 0xdd, 0x19, 0x5d, 0x0e, 0x67, 0x34, 0xe2, 0x3e, 0x89, 0x70, 0x12, 0x14,
0x6d, 0x9f, 0x74, 0x18, 0x0e, 0xee, 0xc8, 0x08, 0xb3, 0x0e, 0xbe, 0xc7, 0xa3, 0x90, 0x13, 0xea, 0x8f, 0x7e, 0x4c, 0x86, 0x0c, 0x27, 0xd7, 0x64, 0x86, 0xd9, 0x10, 0xdf, 0xe0, 0x59, 0xca, 0x09,
0x2d, 0x23, 0xd3, 0x0f, 0x28, 0xa7, 0xa8, 0xbe, 0x1c, 0x31, 0xef, 0xbe, 0x33, 0xd3, 0x09, 0xfd, 0x8d, 0xb6, 0x27, 0x37, 0x4e, 0x28, 0xa7, 0xe8, 0x64, 0x9b, 0xe2, 0x5e, 0x7f, 0xe1, 0xe6, 0x19,
0xcb, 0x31, 0xa5, 0x63, 0x17, 0x77, 0xa2, 0xae, 0x9b, 0xf0, 0xb6, 0x83, 0xa7, 0x3e, 0x9f, 0xc5, 0xf6, 0xc7, 0x73, 0x4a, 0xe7, 0x21, 0x1e, 0xca, 0xa8, 0xcb, 0xf4, 0x6a, 0x88, 0x97, 0x31, 0x5f,
0x43, 0xfa, 0x17, 0xeb, 0x7f, 0xda, 0x5e, 0xfa, 0xd7, 0xc1, 0x98, 0x8e, 0x69, 0x14, 0x76, 0x44, 0xa9, 0x24, 0xfb, 0xa3, 0xdd, 0x3f, 0xfd, 0x28, 0xff, 0xeb, 0x78, 0x4e, 0xe7, 0x54, 0x1e, 0x87,
0x94, 0x54, 0x7f, 0xfc, 0x24, 0xb8, 0x7c, 0xe6, 0x63, 0xd6, 0x99, 0xd2, 0xd0, 0xe3, 0xf1, 0x6f, 0xe2, 0x94, 0x79, 0xbf, 0x7e, 0x2f, 0xb8, 0x7c, 0x15, 0x63, 0x36, 0x5c, 0xd2, 0x34, 0xe2, 0xea,
0x32, 0x7d, 0xfa, 0x82, 0xe9, 0x45, 0x71, 0x19, 0x25, 0xa7, 0x34, 0xd7, 0x41, 0x73, 0x32, 0xc5, 0x37, 0xcb, 0x3e, 0xbf, 0x47, 0xf6, 0xc6, 0xb9, 0x3d, 0x65, 0x55, 0x7a, 0xbb, 0xa0, 0x39, 0x59,
0x8c, 0xdb, 0x53, 0x3f, 0x6e, 0x30, 0xfe, 0x2d, 0x40, 0xad, 0x1f, 0x60, 0x9b, 0x63, 0x0b, 0xff, 0x62, 0xc6, 0xfd, 0x65, 0xac, 0x02, 0x9c, 0xdf, 0x6a, 0xd0, 0x19, 0x27, 0xd8, 0xe7, 0xd8, 0xc3,
0x15, 0x62, 0xc6, 0x51, 0x1d, 0x0a, 0xc4, 0xd1, 0x94, 0x96, 0xd2, 0xde, 0x39, 0x51, 0xe7, 0x8f, 0x3f, 0xa7, 0x98, 0x71, 0x74, 0x02, 0x35, 0x12, 0x58, 0x46, 0xdf, 0x18, 0x3c, 0x38, 0x33, 0xd7,
0xcd, 0xc2, 0xd9, 0xa9, 0x55, 0x20, 0x0e, 0x6a, 0x43, 0x89, 0xf9, 0x78, 0xa4, 0x15, 0x5a, 0x4a, 0x6f, 0x7a, 0xb5, 0xe7, 0xe7, 0x5e, 0x8d, 0x04, 0x68, 0x00, 0x0d, 0x16, 0xe3, 0x99, 0x55, 0xeb,
0xbb, 0xda, 0x3d, 0x30, 0xe3, 0x93, 0xcd, 0xf4, 0x64, 0xb3, 0xe7, 0xcd, 0xac, 0xa8, 0x03, 0x75, 0x1b, 0x83, 0xf6, 0xe9, 0xb1, 0xab, 0x2a, 0xbb, 0x79, 0x65, 0x77, 0x14, 0xad, 0x3c, 0x19, 0x81,
0x41, 0x0d, 0x28, 0xe5, 0xb7, 0x4c, 0x2b, 0xb6, 0x8a, 0xed, 0x6a, 0x57, 0x37, 0x57, 0xf9, 0x8e, 0x4e, 0xc1, 0x4c, 0x28, 0xe5, 0x57, 0xcc, 0xaa, 0xf7, 0xeb, 0x83, 0xf6, 0xa9, 0xed, 0x96, 0xe7,
0x40, 0x9b, 0x17, 0x62, 0x59, 0x2b, 0xe9, 0x44, 0x1a, 0x6c, 0x07, 0xa1, 0x27, 0xd0, 0x69, 0x25, 0x2d, 0x41, 0xbb, 0x2f, 0x45, 0xb3, 0x5e, 0x16, 0x89, 0x2c, 0x38, 0x4c, 0xd2, 0x48, 0xa0, 0xb3,
0xf1, 0x68, 0x2b, 0x4d, 0xd1, 0x01, 0x94, 0x19, 0x77, 0x88, 0xa7, 0x95, 0xa3, 0x7a, 0x9c, 0xa0, 0x1a, 0xe2, 0x6a, 0x2f, 0x37, 0xd1, 0x31, 0x34, 0x19, 0x0f, 0x48, 0x64, 0x35, 0xa5, 0x5f, 0x19,
0x3a, 0xa8, 0x8c, 0x3b, 0x34, 0xe4, 0x9a, 0x1a, 0x95, 0x93, 0x2c, 0xa9, 0xe3, 0x20, 0xd0, 0xb6, 0xe8, 0x04, 0x4c, 0xc6, 0x03, 0x9a, 0x72, 0xcb, 0x94, 0xee, 0xcc, 0xca, 0xfc, 0x38, 0x49, 0xac,
0x17, 0x75, 0x1c, 0x04, 0x48, 0x87, 0x0a, 0xc7, 0xc1, 0x94, 0x78, 0xb6, 0xab, 0x55, 0x5a, 0x4a, 0xc3, 0x8d, 0x1f, 0x27, 0x09, 0xb2, 0xa1, 0xc5, 0x71, 0xb2, 0x24, 0x91, 0x1f, 0x5a, 0xad, 0xbe,
0xbb, 0x62, 0x2d, 0x72, 0xe3, 0x07, 0xd8, 0x4b, 0x29, 0x60, 0x3e, 0xf5, 0x18, 0x96, 0x72, 0xb0, 0x31, 0x68, 0x79, 0x1b, 0xdb, 0xf9, 0x0a, 0x1e, 0xe6, 0x23, 0x60, 0x31, 0x8d, 0x18, 0xd6, 0xce,
0x0f, 0x45, 0x9f, 0x38, 0x11, 0x05, 0x35, 0x4b, 0x84, 0xc6, 0x31, 0xec, 0x0e, 0xb9, 0x1d, 0xf0, 0xe0, 0x11, 0xd4, 0x63, 0x12, 0xc8, 0x11, 0x74, 0x3c, 0x71, 0x74, 0x9e, 0xc2, 0xd1, 0x94, 0xfb,
0x1c, 0xf6, 0x8c, 0xaf, 0xa1, 0x76, 0x8a, 0x5d, 0x9c, 0x4b, 0xb3, 0xf1, 0x9f, 0x02, 0x7b, 0x69, 0x09, 0xaf, 0x98, 0x9e, 0xf3, 0x29, 0x74, 0xce, 0x71, 0x88, 0x2b, 0xc7, 0xec, 0xfc, 0x6e, 0xc0,
0x67, 0x0e, 0x9a, 0x26, 0x54, 0xf1, 0x3d, 0xe1, 0xd7, 0x8c, 0xdb, 0x3c, 0x64, 0x09, 0x2a, 0x10, 0xc3, 0x3c, 0xb2, 0x02, 0x4d, 0x0f, 0xda, 0xf8, 0x86, 0xf0, 0xd7, 0x8c, 0xfb, 0x3c, 0x65, 0x19,
0xa5, 0x61, 0x54, 0x41, 0x3d, 0xd8, 0x11, 0x19, 0x76, 0xae, 0x6d, 0xae, 0x15, 0xa3, 0x7b, 0xd3, 0x2a, 0x10, 0xae, 0xa9, 0xf4, 0xa0, 0x11, 0x3c, 0x10, 0x16, 0x0e, 0x5e, 0xfb, 0xdc, 0xaa, 0xcb,
0x9f, 0xdd, 0xdb, 0x1f, 0xa9, 0x22, 0x4e, 0x2a, 0x0f, 0x8f, 0xcd, 0xad, 0xff, 0xdf, 0x35, 0x15, 0x77, 0xb3, 0xef, 0xbc, 0xdb, 0x8f, 0x39, 0x23, 0xce, 0x5a, 0xb7, 0x6f, 0x7a, 0x07, 0x7f, 0xfc,
0xab, 0x12, 0x8f, 0xf5, 0xb8, 0x71, 0x04, 0xd5, 0x33, 0xef, 0x96, 0xe6, 0xa1, 0xae, 0x41, 0xf5, 0xdb, 0x33, 0xbc, 0x96, 0x4a, 0x1b, 0x71, 0xe7, 0x09, 0xb4, 0x9f, 0x47, 0x57, 0xb4, 0x0a, 0x75,
0x57, 0xc2, 0x52, 0x16, 0x8c, 0xdf, 0x60, 0x37, 0x4e, 0x93, 0x0d, 0x7e, 0x06, 0x58, 0x48, 0x80, 0x07, 0xda, 0xdf, 0x11, 0x96, 0x4f, 0xc1, 0xf9, 0x1e, 0x8e, 0x94, 0x99, 0x75, 0xf0, 0x2d, 0xc0,
0x69, 0x4a, 0xa4, 0x8a, 0xc6, 0x46, 0x55, 0xf4, 0xd3, 0x9a, 0x95, 0x99, 0x30, 0x2e, 0xa1, 0x7a, 0x86, 0x02, 0xcc, 0x32, 0x24, 0x2b, 0xba, 0x7b, 0x59, 0x31, 0xce, 0x7d, 0x5e, 0x21, 0xc3, 0x79,
0x4e, 0x5c, 0x37, 0x4f, 0xa2, 0xe2, 0xf2, 0xc9, 0x58, 0x5c, 0x71, 0xcc, 0x45, 0x92, 0x89, 0x6b, 0x05, 0xed, 0x17, 0x24, 0x0c, 0xab, 0x28, 0x2a, 0x1e, 0x9f, 0xcc, 0xc5, 0x13, 0xab, 0x59, 0x64,
0xb3, 0x5d, 0x37, 0x62, 0xa0, 0x62, 0x89, 0xd0, 0xf8, 0x0c, 0x6a, 0x83, 0x3b, 0xec, 0x71, 0x96, 0x96, 0x78, 0x36, 0x3f, 0x0c, 0xe5, 0x04, 0x5a, 0x9e, 0x38, 0x3a, 0x1f, 0x40, 0x67, 0x72, 0x8d,
0x22, 0x7e, 0xad, 0x40, 0x75, 0x70, 0x8f, 0x47, 0x79, 0x8f, 0xc8, 0xea, 0xa8, 0xb0, 0xaa, 0xa3, 0x23, 0xce, 0x72, 0xc4, 0x7f, 0x1a, 0xd0, 0x9e, 0xdc, 0xe0, 0x59, 0xd5, 0x15, 0x45, 0x1e, 0xd5,
0xa5, 0x52, 0x8b, 0x9b, 0x95, 0x5a, 0x92, 0x28, 0xb5, 0xbc, 0xa2, 0xd4, 0xd4, 0x67, 0x6a, 0x9e, 0xca, 0x3c, 0xda, 0x32, 0xb5, 0xbe, 0x9f, 0xa9, 0x0d, 0x0d, 0x53, 0x9b, 0x25, 0xa6, 0xe6, 0x3a,
0xcf, 0x8c, 0x16, 0xec, 0xc6, 0x90, 0x13, 0x96, 0x13, 0x75, 0x2a, 0x4b, 0x75, 0x3a, 0x00, 0x57, 0x33, 0xab, 0x74, 0xe6, 0xf4, 0xe1, 0x48, 0x41, 0xce, 0xa6, 0x9c, 0xb1, 0xd3, 0xd8, 0xb2, 0x33,
0x7c, 0x96, 0xb7, 0xd3, 0x33, 0x55, 0x8b, 0x4d, 0xfe, 0x26, 0x0e, 0x9f, 0x44, 0x9b, 0xd4, 0xac, 0x00, 0xb8, 0xe0, 0xab, 0xaa, 0x9e, 0xee, 0xb0, 0x5a, 0x74, 0xf2, 0x0b, 0x09, 0xf8, 0x42, 0x76,
0x38, 0x11, 0x88, 0x27, 0x98, 0x8c, 0x27, 0xf1, 0x26, 0x35, 0x2b, 0xc9, 0x8c, 0x9f, 0xe0, 0xf3, 0xd2, 0xf1, 0x94, 0x21, 0x10, 0x2f, 0x30, 0x99, 0x2f, 0x54, 0x27, 0x1d, 0x2f, 0xb3, 0x9c, 0x6f,
0xbe, 0x4b, 0x19, 0x1e, 0x8a, 0x7d, 0x5f, 0xfc, 0xb0, 0xee, 0x2b, 0x15, 0xf6, 0x17, 0xd7, 0x3e, 0xe0, 0xc3, 0x71, 0x48, 0x19, 0x9e, 0x8a, 0x7e, 0xef, 0x7d, 0x99, 0x90, 0xd0, 0x85, 0x9f, 0x32,
0x8c, 0xdf, 0xc5, 0xe8, 0x4f, 0x50, 0x63, 0x4f, 0xa2, 0x23, 0x73, 0xf3, 0xdb, 0xda, 0x5c, 0x79, 0xfc, 0x1e, 0x12, 0xf2, 0x30, 0x4b, 0x97, 0x55, 0x81, 0xa7, 0x7f, 0x1d, 0xc2, 0xa3, 0x0d, 0x8f,
0x6d, 0xe9, 0xc7, 0x79, 0x6d, 0x09, 0x49, 0x03, 0x28, 0x47, 0x86, 0x45, 0x87, 0xb2, 0x81, 0xac, 0xa6, 0x6a, 0xb9, 0xa3, 0x9f, 0xc0, 0x54, 0x22, 0x47, 0x4f, 0xdc, 0xfd, 0xeb, 0xdf, 0x2d, 0xed,
0x9f, 0xf5, 0xfa, 0x33, 0xfe, 0x07, 0xe2, 0x9b, 0x20, 0xf0, 0xc5, 0x2e, 0x95, 0xe3, 0x5b, 0xf1, 0x41, 0xfb, 0x69, 0x55, 0x58, 0x36, 0xf5, 0x09, 0x34, 0xe5, 0x06, 0x40, 0x8f, 0x75, 0x09, 0xc5,
0xbb, 0x1c, 0xdf, 0x9a, 0xd9, 0xcf, 0xa1, 0x24, 0x0c, 0x87, 0xbe, 0x92, 0xf5, 0x67, 0xec, 0xa8, 0x05, 0x61, 0x9f, 0xdc, 0x79, 0xd0, 0x89, 0xf8, 0xc8, 0x08, 0x7c, 0x4a, 0xf6, 0x7a, 0x7c, 0xa5,
0xe7, 0x78, 0x08, 0xfd, 0x0e, 0x25, 0xe1, 0x43, 0xf9, 0x61, 0x19, 0xd3, 0xea, 0x87, 0x1f, 0x6f, 0x05, 0xa2, 0xc7, 0xb7, 0xb3, 0x3d, 0x5e, 0x40, 0x43, 0x28, 0x18, 0x7d, 0xa2, 0x8b, 0x2f, 0xe8,
0x4a, 0xf0, 0xf5, 0xa1, 0x24, 0xac, 0x28, 0x3f, 0x32, 0x63, 0x54, 0x29, 0x7b, 0x17, 0xa0, 0xc6, 0xdb, 0xae, 0x10, 0x25, 0xfa, 0x01, 0x1a, 0x42, 0xd8, 0xfa, 0x62, 0x85, 0x2d, 0x60, 0x3f, 0x7e,
0xf6, 0x93, 0xb3, 0xb7, 0x62, 0x4f, 0x7d, 0xf3, 0x27, 0x24, 0xea, 0xf9, 0x56, 0x11, 0x6b, 0x0a, 0x77, 0x50, 0x86, 0x6f, 0x0c, 0x0d, 0xa1, 0x6d, 0x7d, 0xc9, 0x82, 0xf2, 0xb5, 0xd3, 0x7b, 0x09,
0x23, 0xc8, 0x31, 0x65, 0x9c, 0x2d, 0x5f, 0x73, 0xc5, 0x4b, 0x3d, 0x28, 0x5e, 0xf1, 0x19, 0x32, 0xa6, 0xd2, 0xb3, 0x7e, 0x7a, 0x25, 0xbd, 0xdb, 0xfb, 0xbf, 0x49, 0x32, 0xe6, 0x73, 0x43, 0xb4,
0x64, 0xcd, 0x4b, 0x5b, 0x49, 0x97, 0xbc, 0x04, 0x58, 0xda, 0x02, 0x7d, 0x23, 0xd5, 0xe7, 0xba, 0x29, 0x94, 0xa5, 0xc7, 0x54, 0x58, 0x15, 0xfa, 0x36, 0x4b, 0xe2, 0x1c, 0x41, 0xfd, 0x82, 0xaf,
0x75, 0x64, 0x07, 0x9e, 0x68, 0x0f, 0x4f, 0x8d, 0xad, 0xb7, 0x4f, 0x8d, 0xad, 0x7f, 0xe6, 0x0d, 0x90, 0xa3, 0x0b, 0xde, 0xea, 0x54, 0xdb, 0xe4, 0x2b, 0x80, 0xad, 0xce, 0xd0, 0x67, 0x5a, 0x7e,
0xe5, 0x61, 0xde, 0x50, 0xde, 0xcc, 0x1b, 0xca, 0xfb, 0x79, 0x43, 0xb9, 0x51, 0xa3, 0xce, 0xef, 0xee, 0x6a, 0x51, 0x5b, 0x70, 0x02, 0x4d, 0xa9, 0x3c, 0x3d, 0x75, 0x8b, 0xc2, 0xd4, 0x96, 0x79,
0x3f, 0x04, 0x00, 0x00, 0xff, 0xff, 0x74, 0xa6, 0x26, 0x26, 0x22, 0x09, 0x00, 0x00, 0x06, 0xa6, 0x12, 0xa6, 0x7e, 0xf8, 0x25, 0xe1, 0xea, 0x0a, 0x9d, 0x59, 0xb7, 0x6f, 0xbb, 0x07,
0xff, 0xbc, 0xed, 0x1e, 0xfc, 0xba, 0xee, 0x1a, 0xb7, 0xeb, 0xae, 0xf1, 0xf7, 0xba, 0x6b, 0xfc,
0xb7, 0xee, 0x1a, 0x97, 0xa6, 0x8c, 0xfc, 0xf2, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xba,
0xdb, 0xdc, 0x03, 0x0a, 0x00, 0x00,
} }

View File

@ -20,6 +20,8 @@ service ContainerService {
rpc Exec(ExecRequest) returns (ExecResponse); rpc Exec(ExecRequest) returns (ExecResponse);
rpc Pty(PtyRequest) returns (google.protobuf.Empty); rpc Pty(PtyRequest) returns (google.protobuf.Empty);
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty); rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
rpc Pause(PauseRequest) returns (google.protobuf.Empty);
rpc Resume(ResumeRequest) returns (google.protobuf.Empty);
} }
message CreateRequest { message CreateRequest {
@ -96,3 +98,11 @@ message CloseStdinRequest {
string id = 1 [(gogoproto.customname) = "ID"]; string id = 1 [(gogoproto.customname) = "ID"];
uint32 pid = 2; uint32 pid = 2;
} }
message PauseRequest {
string id = 1 [(gogoproto.customname) = "ID"];
}
message ResumeRequest {
string id = 1 [(gogoproto.customname) = "ID"];
}

View File

@ -263,7 +263,8 @@ type changeWriter struct {
tw *tar.Writer tw *tar.Writer
source string source string
whiteoutT time.Time whiteoutT time.Time
inodeCache map[uint64]string inodeSrc map[uint64]string
inodeRefs map[uint64][]string
} }
func newChangeWriter(w io.Writer, source string) *changeWriter { func newChangeWriter(w io.Writer, source string) *changeWriter {
@ -271,7 +272,8 @@ func newChangeWriter(w io.Writer, source string) *changeWriter {
tw: tar.NewWriter(w), tw: tar.NewWriter(w),
source: source, source: source,
whiteoutT: time.Now(), whiteoutT: time.Now(),
inodeCache: map[uint64]string{}, inodeSrc: map[uint64]string{},
inodeRefs: map[uint64][]string{},
} }
} }
@ -334,15 +336,28 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
return errors.Wrap(err, "failed to set device headers") return errors.Wrap(err, "failed to set device headers")
} }
linkname, err := fs.GetLinkSource(name, f, cw.inodeCache) // additionalLinks stores file names which must be linked to
if err != nil { // this file when this file is added
return errors.Wrap(err, "failed to get hardlink") var additionalLinks []string
} inode, isHardlink := fs.GetLinkInfo(f)
if isHardlink {
if linkname != "" { // If the inode has a source, always link to it
if source, ok := cw.inodeSrc[inode]; ok {
hdr.Typeflag = tar.TypeLink hdr.Typeflag = tar.TypeLink
hdr.Linkname = linkname hdr.Linkname = source
hdr.Size = 0 hdr.Size = 0
} else {
if k == fs.ChangeKindUnmodified {
cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
return nil
}
cw.inodeSrc[inode] = name
additionalLinks = cw.inodeRefs[inode]
delete(cw.inodeRefs, inode)
}
} else if k == fs.ChangeKindUnmodified {
// Nothing to write to diff
return nil
} }
if capability, err := getxattr(source, "security.capability"); err != nil { if capability, err := getxattr(source, "security.capability"); err != nil {
@ -374,6 +389,19 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
return errors.New("short write copying file") return errors.New("short write copying file")
} }
} }
if additionalLinks != nil {
source = hdr.Name
for _, extra := range additionalLinks {
hdr.Name = extra
hdr.Typeflag = tar.TypeLink
hdr.Linkname = source
hdr.Size = 0
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
}
}
} }
return nil return nil
} }

View File

@ -1,3 +1,5 @@
// +build !windows
package archive package archive
import ( import (
@ -6,9 +8,9 @@ import (
"sync" "sync"
"syscall" "syscall"
"github.com/containerd/continuity/sysx"
"github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/system"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/stevvooe/continuity/sysx"
) )
func tarName(p string) (string, error) { func tarName(p string) (string, error) {

View File

@ -0,0 +1,14 @@
package archive
import (
"time"
"github.com/pkg/errors"
)
// as at MacOS 10.12 there is apparently no way to set timestamps
// with nanosecond precision. We could fall back to utimes/lutimes
// and lose the precision as a temporary workaround.
func chtimes(path string, atime, mtime time.Time) error {
return errors.New("OSX missing UtimesNanoAt")
}

View File

@ -1,3 +1,5 @@
// +build linux freebsd
package archive package archive
import ( import (

View File

@ -14,6 +14,10 @@ type Container interface {
Start(context.Context) error Start(context.Context) error
// State returns the container's state // State returns the container's state
State(context.Context) (State, error) State(context.Context) (State, error)
// Pause pauses the container process
Pause(context.Context) error
// Resume unpauses the container process
Resume(context.Context) error
// Kill signals a container // Kill signals a container
Kill(context.Context, uint32, bool) error Kill(context.Context, uint32, bool) error
// Exec adds a process into the container // Exec adds a process into the container
@ -26,9 +30,6 @@ type Container interface {
type LinuxContainer interface { type LinuxContainer interface {
Container Container
Pause(context.Context) error
Resume(context.Context) error
} }
type ExecOpts struct { type ExecOpts struct {

View File

@ -28,24 +28,62 @@ var (
} }
) )
type Provider interface {
Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error)
}
type Ingester interface {
Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error)
}
// TODO(stevvooe): Consider a very different name for this struct. Info is way
// to general. It also reads very weird in certain context, like pluralization.
type Info struct { type Info struct {
Digest digest.Digest Digest digest.Digest
Size int64 Size int64
CommittedAt time.Time CommittedAt time.Time
} }
type Provider interface {
Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error)
}
type Status struct { type Status struct {
Ref string Ref string
Offset int64 Offset int64
Total int64 Total int64
Expected digest.Digest
StartedAt time.Time StartedAt time.Time
UpdatedAt time.Time UpdatedAt time.Time
} }
// WalkFunc defines the callback for a blob walk.
type WalkFunc func(Info) error
// Manager provides methods for inspecting, listing and removing content.
type Manager interface {
// Info will return metadata about content available in the content store.
//
// If the content is not present, ErrNotFound will be returned.
Info(ctx context.Context, dgst digest.Digest) (Info, error)
// Walk will call fn for each item in the content store.
Walk(ctx context.Context, fn WalkFunc) error
// Delete removes the content from the store.
Delete(ctx context.Context, dgst digest.Digest) error
// Status returns the status of any active ingestions whose ref match the
// provided regular expression. If empty, all active ingestions will be
// returned.
//
// TODO(stevvooe): Status may be slighly out of place here. If this remains
// here, we should remove Manager and just define these on store.
Status(ctx context.Context, re string) ([]Status, error)
// Abort completely cancels the ingest operation targeted by ref.
//
// TODO(stevvooe): Same consideration as above. This should really be
// restricted to an ingest management interface.
Abort(ctx context.Context, ref string) error
}
type Writer interface { type Writer interface {
io.WriteCloser io.WriteCloser
Status() (Status, error) Status() (Status, error)
@ -54,8 +92,12 @@ type Writer interface {
Truncate(size int64) error Truncate(size int64) error
} }
type Ingester interface { // Store combines the methods of content-oriented interfaces into a set that
Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error) // are commonly provided by complete implementations.
type Store interface {
Manager
Ingester
Provider
} }
func IsNotFound(err error) bool { func IsNotFound(err error) bool {

View File

@ -7,6 +7,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"strconv" "strconv"
"time" "time"
@ -21,21 +22,21 @@ import (
// //
// Store can generally support multi-reader, single-writer ingest of data, // Store can generally support multi-reader, single-writer ingest of data,
// including resumable ingest. // including resumable ingest.
type Store struct { type store struct {
root string root string
} }
func NewStore(root string) (*Store, error) { func NewStore(root string) (Store, error) {
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) { if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
return nil, err return nil, err
} }
return &Store{ return &store{
root: root, root: root,
}, nil }, nil
} }
func (s *Store) Info(dgst digest.Digest) (Info, error) { func (s *store) Info(ctx context.Context, dgst digest.Digest) (Info, error) {
p := s.blobPath(dgst) p := s.blobPath(dgst)
fi, err := os.Stat(p) fi, err := os.Stat(p)
if err != nil { if err != nil {
@ -46,11 +47,15 @@ func (s *Store) Info(dgst digest.Digest) (Info, error) {
return Info{}, err return Info{}, err
} }
return s.info(dgst, fi), nil
}
func (s *store) info(dgst digest.Digest, fi os.FileInfo) Info {
return Info{ return Info{
Digest: dgst, Digest: dgst,
Size: fi.Size(), Size: fi.Size(),
CommittedAt: fi.ModTime(), CommittedAt: fi.ModTime(),
}, nil }
} }
// Open returns an io.ReadCloser for the blob. // Open returns an io.ReadCloser for the blob.
@ -58,7 +63,7 @@ func (s *Store) Info(dgst digest.Digest) (Info, error) {
// TODO(stevvooe): This would work much better as an io.ReaderAt in practice. // TODO(stevvooe): This would work much better as an io.ReaderAt in practice.
// Right now, we are doing type assertion to tease that out, but it won't scale // Right now, we are doing type assertion to tease that out, but it won't scale
// well. // well.
func (s *Store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) { func (s *store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
fp, err := os.Open(s.blobPath(dgst)) fp, err := os.Open(s.blobPath(dgst))
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -74,7 +79,7 @@ func (s *Store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser,
// //
// While this is safe to do concurrently, safe exist-removal logic must hold // While this is safe to do concurrently, safe exist-removal logic must hold
// some global lock on the store. // some global lock on the store.
func (cs *Store) Delete(dgst digest.Digest) error { func (cs *store) Delete(ctx context.Context, dgst digest.Digest) error {
if err := os.RemoveAll(cs.blobPath(dgst)); err != nil { if err := os.RemoveAll(cs.blobPath(dgst)); err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return err return err
@ -88,14 +93,7 @@ func (cs *Store) Delete(dgst digest.Digest) error {
// TODO(stevvooe): Allow querying the set of blobs in the blob store. // TODO(stevvooe): Allow querying the set of blobs in the blob store.
// WalkFunc defines the callback for a blob walk. func (cs *store) Walk(ctx context.Context, fn WalkFunc) error {
//
// TODO(stevvooe): Remove the file info. Just need size and modtime. Perhaps,
// not a huge deal, considering we have a path, but let's not just let this one
// go without scrutiny.
type WalkFunc func(path string, fi os.FileInfo, dgst digest.Digest) error
func (cs *Store) Walk(fn WalkFunc) error {
root := filepath.Join(cs.root, "blobs") root := filepath.Join(cs.root, "blobs")
var alg digest.Algorithm var alg digest.Algorithm
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
@ -133,17 +131,60 @@ func (cs *Store) Walk(fn WalkFunc) error {
// store or extra paths not expected previously. // store or extra paths not expected previously.
} }
return fn(path, fi, dgst) return fn(cs.info(dgst, fi))
}) })
} }
// Status returns the current status of a blob by the ingest ref. func (s *store) Status(ctx context.Context, re string) ([]Status, error) {
func (s *Store) Status(ref string) (Status, error) { fp, err := os.Open(filepath.Join(s.root, "ingest"))
return s.status(s.ingestRoot(ref)) if err != nil {
return nil, err
}
defer fp.Close()
fis, err := fp.Readdir(-1)
if err != nil {
return nil, err
}
rec, err := regexp.Compile(re)
if err != nil {
return nil, err
}
var active []Status
for _, fi := range fis {
p := filepath.Join(s.root, "ingest", fi.Name())
stat, err := s.status(p)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
// TODO(stevvooe): This is a common error if uploads are being
// completed while making this listing. Need to consider taking a
// lock on the whole store to coordinate this aspect.
//
// Another option is to cleanup downloads asynchronously and
// coordinate this method with the cleanup process.
//
// For now, we just skip them, as they really don't exist.
continue
}
if !rec.MatchString(stat.Ref) {
continue
}
active = append(active, stat)
}
return active, nil
} }
// status works like stat above except uses the path to the ingest. // status works like stat above except uses the path to the ingest.
func (s *Store) status(ingestPath string) (Status, error) { func (s *store) status(ingestPath string) (Status, error) {
dp := filepath.Join(ingestPath, "data") dp := filepath.Join(ingestPath, "data")
fi, err := os.Stat(dp) fi, err := os.Stat(dp)
if err != nil { if err != nil {
@ -165,7 +206,7 @@ func (s *Store) status(ingestPath string) (Status, error) {
} }
// total attempts to resolve the total expected size for the write. // total attempts to resolve the total expected size for the write.
func (s *Store) total(ingestPath string) int64 { func (s *store) total(ingestPath string) int64 {
totalS, err := readFileString(filepath.Join(ingestPath, "total")) totalS, err := readFileString(filepath.Join(ingestPath, "total"))
if err != nil { if err != nil {
return 0 return 0
@ -185,7 +226,10 @@ func (s *Store) total(ingestPath string) int64 {
// ref at a time. // ref at a time.
// //
// The argument `ref` is used to uniquely identify a long-lived writer transaction. // The argument `ref` is used to uniquely identify a long-lived writer transaction.
func (s *Store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) { func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) {
// TODO(stevvooe): Need to actually store and handle expected here. We have
// code in the service that shouldn't be dealing with this.
path, refp, data, lock, err := s.ingestPaths(ref) path, refp, data, lock, err := s.ingestPaths(ref)
if err != nil { if err != nil {
return nil, err return nil, err
@ -283,11 +327,11 @@ func (s *Store) Writer(ctx context.Context, ref string, total int64, expected di
// Abort an active transaction keyed by ref. If the ingest is active, it will // Abort an active transaction keyed by ref. If the ingest is active, it will
// be cancelled. Any resources associated with the ingest will be cleaned. // be cancelled. Any resources associated with the ingest will be cleaned.
func (s *Store) Abort(ref string) error { func (s *store) Abort(ctx context.Context, ref string) error {
root := s.ingestRoot(ref) root := s.ingestRoot(ref)
if err := os.RemoveAll(root); err != nil { if err := os.RemoveAll(root); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil return ErrNotFound
} }
return err return err
@ -296,50 +340,11 @@ func (s *Store) Abort(ref string) error {
return nil return nil
} }
func (s *Store) Active() ([]Status, error) { func (cs *store) blobPath(dgst digest.Digest) string {
fp, err := os.Open(filepath.Join(s.root, "ingest"))
if err != nil {
return nil, err
}
defer fp.Close()
fis, err := fp.Readdir(-1)
if err != nil {
return nil, err
}
var active []Status
for _, fi := range fis {
p := filepath.Join(s.root, "ingest", fi.Name())
stat, err := s.status(p)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
// TODO(stevvooe): This is a common error if uploads are being
// completed while making this listing. Need to consider taking a
// lock on the whole store to coordinate this aspect.
//
// Another option is to cleanup downloads asynchronously and
// coordinate this method with the cleanup process.
//
// For now, we just skip them, as they really don't exist.
continue
}
active = append(active, stat)
}
return active, nil
}
func (cs *Store) blobPath(dgst digest.Digest) string {
return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
} }
func (s *Store) ingestRoot(ref string) string { func (s *store) ingestRoot(ref string) string {
dgst := digest.FromString(ref) dgst := digest.FromString(ref)
return filepath.Join(s.root, "ingest", dgst.Hex()) return filepath.Join(s.root, "ingest", dgst.Hex())
} }
@ -351,7 +356,7 @@ func (s *Store) ingestRoot(ref string) string {
// - data: file where data is written // - data: file where data is written
// - lock: lock file location // - lock: lock file location
// //
func (s *Store) ingestPaths(ref string) (string, string, string, lockfile.Lockfile, error) { func (s *store) ingestPaths(ref string) (string, string, string, lockfile.Lockfile, error) {
var ( var (
fp = s.ingestRoot(ref) fp = s.ingestRoot(ref)
rp = filepath.Join(fp, "ref") rp = filepath.Join(fp, "ref")

View File

@ -0,0 +1,15 @@
package content
import (
"os"
"syscall"
"time"
)
func getStartTime(fi os.FileInfo) time.Time {
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
}
return fi.ModTime()
}

View File

@ -1,4 +1,4 @@
// +build linux // +build darwin freebsd
package content package content
@ -10,7 +10,7 @@ import (
func getStartTime(fi os.FileInfo) time.Time { func getStartTime(fi os.FileInfo) time.Time {
if st, ok := fi.Sys().(*syscall.Stat_t); ok { if st, ok := fi.Sys().(*syscall.Stat_t); ok {
return time.Unix(st.Ctim.Sec, st.Ctim.Nsec) return time.Unix(int64(st.Ctimespec.Sec), int64(st.Ctimespec.Nsec))
} }
return fi.ModTime() return fi.ModTime()

View File

@ -13,7 +13,7 @@ import (
// writer represents a write transaction against the blob store. // writer represents a write transaction against the blob store.
type writer struct { type writer struct {
s *Store s *store
fp *os.File // opened data file fp *os.File // opened data file
lock lockfile.Lockfile lock lockfile.Lockfile
path string // path to writer dir path string // path to writer dir

View File

@ -65,7 +65,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string) error {
} }
continue continue
case (fi.Mode() & os.ModeType) == 0: case (fi.Mode() & os.ModeType) == 0:
link, err := GetLinkSource(target, fi, inodes) link, err := getLinkSource(target, fi, inodes)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to get hardlink") return errors.Wrap(err, "failed to get hardlink")
} }

View File

@ -5,8 +5,9 @@ import (
"os" "os"
"syscall" "syscall"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/stevvooe/continuity/sysx" "golang.org/x/sys/unix"
) )
func copyFileInfo(fi os.FileInfo, name string) error { func copyFileInfo(fi os.FileInfo, name string) error {
@ -21,7 +22,8 @@ func copyFileInfo(fi os.FileInfo, name string) error {
} }
} }
if err := syscall.UtimesNano(name, []syscall.Timespec{st.Atim, st.Mtim}); err != nil { timespec := []unix.Timespec{unix.Timespec(st.Atim), unix.Timespec(st.Mtim)}
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrapf(err, "failed to utime %s", name) return errors.Wrapf(err, "failed to utime %s", name)
} }

View File

@ -0,0 +1,65 @@
// +build darwin freebsd
package fs
import (
"io"
"os"
"syscall"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
)
func copyFileInfo(fi os.FileInfo, name string) error {
st := fi.Sys().(*syscall.Stat_t)
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
return errors.Wrapf(err, "failed to chown %s", name)
}
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
if err := os.Chmod(name, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", name)
}
}
if err := syscall.UtimesNano(name, []syscall.Timespec{st.Atimespec, st.Mtimespec}); err != nil {
return errors.Wrapf(err, "failed to utime %s", name)
}
return nil
}
func copyFileContent(dst, src *os.File) error {
buf := bufferPool.Get().([]byte)
_, err := io.CopyBuffer(dst, src, buf)
bufferPool.Put(buf)
return err
}
func copyXAttrs(dst, src string) error {
xattrKeys, err := sysx.LListxattr(src)
if err != nil {
return errors.Wrapf(err, "failed to list xattrs on %s", src)
}
for _, xattr := range xattrKeys {
data, err := sysx.LGetxattr(src, xattr)
if err != nil {
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
}
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
}
}
return nil
}
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return syscall.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
}

View File

@ -16,9 +16,13 @@ import (
type ChangeKind int type ChangeKind int
const ( const (
// ChangeKindUnmodified represents an unmodified
// file
ChangeKindUnmodified = iota
// ChangeKindAdd represents an addition of // ChangeKindAdd represents an addition of
// a file // a file
ChangeKindAdd = iota ChangeKindAdd
// ChangeKindModify represents a change to // ChangeKindModify represents a change to
// an existing file // an existing file
@ -31,6 +35,8 @@ const (
func (k ChangeKind) String() string { func (k ChangeKind) String() string {
switch k { switch k {
case ChangeKindUnmodified:
return "unmodified"
case ChangeKindAdd: case ChangeKindAdd:
return "add" return "add"
case ChangeKindModify: case ChangeKindModify:
@ -287,8 +293,11 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
f1 = nil f1 = nil
f2 = nil f2 = nil
if same { if same {
if !isLinked(f) {
continue continue
} }
k = ChangeKindUnmodified
}
} }
if err := changeFn(k, p, f, nil); err != nil { if err := changeFn(k, p, f, nil); err != nil {
return err return err

View File

@ -1,3 +1,5 @@
// +build !windows
package fs package fs
import ( import (
@ -7,8 +9,8 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/stevvooe/continuity/sysx"
) )
// whiteouts are files with a special meaning for the layered filesystem. // whiteouts are files with a special meaning for the layered filesystem.
@ -90,3 +92,11 @@ func compareCapabilities(p1, p2 string) (bool, error) {
} }
return bytes.Equal(c1, c2), nil return bytes.Equal(c1, c2), nil
} }
func isLinked(f os.FileInfo) bool {
s, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return false
}
return !f.IsDir() && s.Nlink > 1
}

View File

@ -1,5 +1,7 @@
package fs package fs
import "os"
func detectDirDiff(upper, lower string) *diffDirOptions { func detectDirDiff(upper, lower string) *diffDirOptions {
return nil return nil
} }
@ -13,3 +15,7 @@ func compareCapabilities(p1, p2 string) (bool, error) {
// TODO: Use windows equivalent // TODO: Use windows equivalent
return true, nil return true, nil
} }
func isLinked(os.FileInfo) bool {
return false
}

12
vendor/github.com/containerd/containerd/fs/du.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
package fs
type Usage struct {
Inodes int64
Size int64
}
// DiskUsage counts the number of inodes and disk usage for the resources under
// path.
func DiskUsage(roots ...string) (Usage, error) {
return diskUsage(roots...)
}

42
vendor/github.com/containerd/containerd/fs/du_unix.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// +build !windows
package fs
import (
"os"
"path/filepath"
"syscall"
)
func diskUsage(roots ...string) (Usage, error) {
type inode struct {
// TODO(stevvooe): Can probably reduce memory usage by not tracking
// device, but we can leave this right for now.
dev, ino uint64
}
var (
size int64
inodes = map[inode]struct{}{} // expensive!
)
for _, root := range roots {
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
stat := fi.Sys().(*syscall.Stat_t)
inodes[inode{dev: uint64(stat.Dev), ino: stat.Ino}] = struct{}{}
size += fi.Size()
return nil
}); err != nil {
return Usage{}, err
}
}
return Usage{
Inodes: int64(len(inodes)),
Size: size,
}, nil
}

View File

@ -0,0 +1,33 @@
// +build windows
package fs
import (
"os"
"path/filepath"
)
func diskUsage(roots ...string) (Usage, error) {
var (
size int64
)
// TODO(stevvooe): Support inodes (or equivalent) for windows.
for _, root := range roots {
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
size += fi.Size()
return nil
}); err != nil {
return Usage{}, err
}
}
return Usage{
Size: size,
}, nil
}

View File

@ -2,11 +2,26 @@ package fs
import "os" import "os"
// GetLinkSource returns a path for the given name and // GetLinkID returns an identifier representing the node a hardlink is pointing
// to. If the file is not hard linked then 0 will be returned.
func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
return getLinkInfo(fi)
}
// getLinkSource returns a path for the given name and
// file info to its link source in the provided inode // file info to its link source in the provided inode
// map. If the given file name is not in the map and // map. If the given file name is not in the map and
// has other links, it is added to the inode map // has other links, it is added to the inode map
// to be a source for other link locations. // to be a source for other link locations.
func GetLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
return getHardLink(name, fi, inodes) inode, isHardlink := getLinkInfo(fi)
if !isHardlink {
return "", nil
}
path, ok := inodes[inode]
if !ok {
inodes[inode] = name
}
return path, nil
} }

View File

@ -3,31 +3,15 @@
package fs package fs
import ( import (
"errors"
"os" "os"
"syscall" "syscall"
) )
func getHardLink(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { func getLinkInfo(fi os.FileInfo) (uint64, bool) {
if fi.IsDir() {
return "", nil
}
s, ok := fi.Sys().(*syscall.Stat_t) s, ok := fi.Sys().(*syscall.Stat_t)
if !ok { if !ok {
return "", errors.New("unsupported stat type") return 0, false
} }
// If inode is not hardlinked, no reason to lookup or save inode return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1
if s.Nlink == 1 {
return "", nil
}
inode := uint64(s.Ino)
path, ok := inodes[inode]
if !ok {
inodes[inode] = name
}
return path, nil
} }

View File

@ -2,6 +2,6 @@ package fs
import "os" import "os"
func getHardLink(string, os.FileInfo, map[uint64]string) (string, error) { func getLinkInfo(fi os.FileInfo) (uint64, bool) {
return "", nil return 0, false
} }

70
vendor/github.com/containerd/containerd/mount_linux.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package containerd
import (
"strings"
"golang.org/x/sys/unix"
)
func (m *Mount) Mount(target string) error {
flags, data := parseMountOptions(m.Options)
return unix.Mount(m.Source, target, m.Type, uintptr(flags), data)
}
func Unmount(mount string, flags int) error {
return unix.Unmount(mount, flags)
}
// parseMountOptions takes fstab style mount options and parses them for
// use with a standard mount() syscall
func parseMountOptions(options []string) (int, string) {
var (
flag int
data []string
)
flags := map[string]struct {
clear bool
flag int
}{
"async": {true, unix.MS_SYNCHRONOUS},
"atime": {true, unix.MS_NOATIME},
"bind": {false, unix.MS_BIND},
"defaults": {false, 0},
"dev": {true, unix.MS_NODEV},
"diratime": {true, unix.MS_NODIRATIME},
"dirsync": {false, unix.MS_DIRSYNC},
"exec": {true, unix.MS_NOEXEC},
"mand": {false, unix.MS_MANDLOCK},
"noatime": {false, unix.MS_NOATIME},
"nodev": {false, unix.MS_NODEV},
"nodiratime": {false, unix.MS_NODIRATIME},
"noexec": {false, unix.MS_NOEXEC},
"nomand": {true, unix.MS_MANDLOCK},
"norelatime": {true, unix.MS_RELATIME},
"nostrictatime": {true, unix.MS_STRICTATIME},
"nosuid": {false, unix.MS_NOSUID},
"rbind": {false, unix.MS_BIND | unix.MS_REC},
"relatime": {false, unix.MS_RELATIME},
"remount": {false, unix.MS_REMOUNT},
"ro": {false, unix.MS_RDONLY},
"rw": {true, unix.MS_RDONLY},
"strictatime": {false, unix.MS_STRICTATIME},
"suid": {true, unix.MS_NOSUID},
"sync": {false, unix.MS_SYNCHRONOUS},
}
for _, o := range options {
// If the option does not exist in the flags table or the flag
// is not supported on the platform,
// then it is a data value for a specific fs type
if f, exists := flags[o]; exists && f.flag != 0 {
if f.clear {
flag &^= f.flag
} else {
flag |= f.flag
}
} else {
data = append(data, o)
}
}
return flag, strings.Join(data, ",")
}

View File

@ -1,72 +1,17 @@
// +build linux // +build darwin freebsd
package containerd package containerd
import ( import "github.com/pkg/errors"
"strings"
"golang.org/x/sys/unix" var (
ErrNotImplementOnUnix = errors.New("not implemented under unix")
) )
func (m *Mount) Mount(target string) error { func (m *Mount) Mount(target string) error {
flags, data := parseMountOptions(m.Options) return ErrNotImplementOnUnix
return unix.Mount(m.Source, target, m.Type, uintptr(flags), data)
} }
func Unmount(mount string, flags int) error { func Unmount(mount string, flags int) error {
return unix.Unmount(mount, flags) return ErrNotImplementOnUnix
}
// parseMountOptions takes fstab style mount options and parses them for
// use with a standard mount() syscall
func parseMountOptions(options []string) (int, string) {
var (
flag int
data []string
)
flags := map[string]struct {
clear bool
flag int
}{
"async": {true, unix.MS_SYNCHRONOUS},
"atime": {true, unix.MS_NOATIME},
"bind": {false, unix.MS_BIND},
"defaults": {false, 0},
"dev": {true, unix.MS_NODEV},
"diratime": {true, unix.MS_NODIRATIME},
"dirsync": {false, unix.MS_DIRSYNC},
"exec": {true, unix.MS_NOEXEC},
"mand": {false, unix.MS_MANDLOCK},
"noatime": {false, unix.MS_NOATIME},
"nodev": {false, unix.MS_NODEV},
"nodiratime": {false, unix.MS_NODIRATIME},
"noexec": {false, unix.MS_NOEXEC},
"nomand": {true, unix.MS_MANDLOCK},
"norelatime": {true, unix.MS_RELATIME},
"nostrictatime": {true, unix.MS_STRICTATIME},
"nosuid": {false, unix.MS_NOSUID},
"rbind": {false, unix.MS_BIND | unix.MS_REC},
"relatime": {false, unix.MS_RELATIME},
"remount": {false, unix.MS_REMOUNT},
"ro": {false, unix.MS_RDONLY},
"rw": {true, unix.MS_RDONLY},
"strictatime": {false, unix.MS_STRICTATIME},
"suid": {true, unix.MS_NOSUID},
"sync": {false, unix.MS_SYNCHRONOUS},
}
for _, o := range options {
// If the option does not exist in the flags table or the flag
// is not supported on the platform,
// then it is a data value for a specific fs type
if f, exists := flags[o]; exists && f.flag != 0 {
if f.clear {
flag &^= f.flag
} else {
flag |= f.flag
}
} else {
data = append(data, o)
}
}
return flag, strings.Join(data, ",")
} }

View File

@ -8,6 +8,8 @@ type ContainerMonitor interface {
Monitor(containerd.Container) error Monitor(containerd.Container) error
// Stop stops and removes the provided container from the monitor // Stop stops and removes the provided container from the monitor
Stop(containerd.Container) error Stop(containerd.Container) error
// Events emits events from the monitor
Events(chan<- *containerd.Event)
} }
func NewMultiContainerMonitor(monitors ...ContainerMonitor) ContainerMonitor { func NewMultiContainerMonitor(monitors ...ContainerMonitor) ContainerMonitor {
@ -31,6 +33,9 @@ func (mm *noopContainerMonitor) Stop(c containerd.Container) error {
return nil return nil
} }
func (mm *noopContainerMonitor) Events(events chan<- *containerd.Event) {
}
type multiContainerMonitor struct { type multiContainerMonitor struct {
monitors []ContainerMonitor monitors []ContainerMonitor
} }
@ -52,3 +57,9 @@ func (mm *multiContainerMonitor) Stop(c containerd.Container) error {
} }
return nil return nil
} }
func (mm *multiContainerMonitor) Events(events chan<- *containerd.Event) {
for _, m := range mm.monitors {
m.Events(events)
}
}

View File

@ -33,7 +33,7 @@ type InitContext struct {
Root string Root string
State string State string
Runtimes map[string]containerd.Runtime Runtimes map[string]containerd.Runtime
Content *content.Store Content content.Store
Meta *bolt.DB Meta *bolt.DB
Snapshotter snapshot.Snapshotter Snapshotter snapshot.Snapshotter
Config interface{} Config interface{}

View File

@ -0,0 +1,182 @@
package docker
import (
"net/http"
"sort"
"strings"
)
type authenticationScheme byte
const (
basicAuth authenticationScheme = 1 << iota // Defined in RFC 7617
digestAuth // Defined in RFC 7616
bearerAuth // Defined in RFC 6750
)
// challenge carries information from a WWW-Authenticate response header.
// See RFC 2617.
type challenge struct {
// scheme is the auth-scheme according to RFC 2617
scheme authenticationScheme
// parameters are the auth-params according to RFC 2617
parameters map[string]string
}
type byScheme []challenge
func (bs byScheme) Len() int { return len(bs) }
func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
// Sort in priority order: token > digest > basic
func (bs byScheme) Less(i, j int) bool { return bs[i].scheme > bs[j].scheme }
// Octet types from RFC 2616.
type octetType byte
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
func parseAuthHeader(header http.Header) []challenge {
challenges := []challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
var s authenticationScheme
switch v {
case "basic":
s = basicAuth
case "digest":
s = digestAuth
case "bearer":
s = bearerAuth
default:
continue
}
challenges = append(challenges, challenge{scheme: s, parameters: p})
}
sort.Stable(byScheme(challenges))
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
for {
var pkey string
pkey, s = expectToken(skipSpace(s))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
if !strings.HasPrefix(s, ",") {
return
}
s = s[1:]
}
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}

View File

@ -7,10 +7,12 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/textproto"
"net/url" "net/url"
"path" "path"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
@ -23,15 +25,43 @@ import (
"golang.org/x/net/context/ctxhttp" "golang.org/x/net/context/ctxhttp"
) )
// NOTE(stevvooe): Most of the code below this point is prototype code to var (
// demonstrate a very simplified docker.io fetcher. We have a lot of hard coded // ErrNoToken is returned if a request is successful but the body does not
// values but we leave many of the details down to the fetcher, creating a lot // contain an authorization token.
// of room for ways to fetch content. ErrNoToken = errors.New("authorization server did not include a token in the response")
type dockerResolver struct{} // ErrInvalidAuthorization is used when credentials are passed to a server but
// those credentials are rejected.
ErrInvalidAuthorization = errors.New("authorization failed")
)
func NewResolver() remotes.Resolver { type dockerResolver struct {
return &dockerResolver{} credentials func(string) (string, string, error)
plainHTTP bool
client *http.Client
}
// ResolverOptions are used to configured a new Docker register resolver
type ResolverOptions struct {
// Credentials provides username and secret given a host.
// If username is empty but a secret is given, that secret
// is interpretted as a long lived token.
Credentials func(string) (string, string, error)
// PlainHTTP specifies to use plain http and not https
PlainHTTP bool
// Client is the http client to used when making registry requests
Client *http.Client
}
// NewResolver returns a new resolver to a Docker registry
func NewResolver(options ResolverOptions) remotes.Resolver {
return &dockerResolver{
credentials: options.Credentials,
plainHTTP: options.PlainHTTP,
client: options.Client,
}
} }
var _ remotes.Resolver = &dockerResolver{} var _ remotes.Resolver = &dockerResolver{}
@ -44,30 +74,37 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
var ( var (
base url.URL base url.URL
token string username, secret string
) )
switch refspec.Hostname() { host := refspec.Hostname()
case "docker.io":
base.Scheme = "https" base.Scheme = "https"
if host == "docker.io" {
base.Host = "registry-1.docker.io" base.Host = "registry-1.docker.io"
prefix := strings.TrimPrefix(refspec.Locator, "docker.io/") } else {
base.Path = path.Join("/v2", prefix) base.Host = host
token, err = getToken(ctx, "repository:"+prefix+":pull")
if r.plainHTTP || strings.HasPrefix(host, "localhost:") {
base.Scheme = "http"
}
}
if r.credentials != nil {
username, secret, err = r.credentials(base.Host)
if err != nil { if err != nil {
return "", ocispec.Descriptor{}, nil, err return "", ocispec.Descriptor{}, nil, err
} }
case "localhost:5000":
base.Scheme = "http"
base.Host = "localhost:5000"
base.Path = path.Join("/v2", strings.TrimPrefix(refspec.Locator, "localhost:5000/"))
default:
return "", ocispec.Descriptor{}, nil, errors.Errorf("unsupported locator: %q", refspec.Locator)
} }
prefix := strings.TrimPrefix(refspec.Locator, host+"/")
base.Path = path.Join("/v2", prefix)
fetcher := &dockerFetcher{ fetcher := &dockerFetcher{
base: base, base: base,
token: token, client: r.client,
username: username,
secret: secret,
} }
var ( var (
@ -125,16 +162,13 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
if dgstHeader != "" { if dgstHeader != "" {
if err := dgstHeader.Validate(); err != nil { if err := dgstHeader.Validate(); err != nil {
if err == nil {
return "", ocispec.Descriptor{}, nil, errors.Errorf("%q in header not a valid digest", dgstHeader)
}
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
} }
dgst = dgstHeader dgst = dgstHeader
} }
if dgst == "" { if dgst == "" {
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "could not resolve digest for %v", ref) return "", ocispec.Descriptor{}, nil, errors.Errorf("could not resolve digest for %v", ref)
} }
var ( var (
@ -143,8 +177,12 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
) )
size, err = strconv.ParseInt(sizeHeader, 10, 64) size, err = strconv.ParseInt(sizeHeader, 10, 64)
if err != nil || size < 0 { if err != nil {
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "%q in header not a valid size", sizeHeader)
return "", ocispec.Descriptor{}, nil, errors.Wrapf(err, "invalid size header: %q", sizeHeader)
}
if size < 0 {
return "", ocispec.Descriptor{}, nil, errors.Errorf("%q in header not a valid size", sizeHeader)
} }
desc := ocispec.Descriptor{ desc := ocispec.Descriptor{
@ -163,6 +201,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
type dockerFetcher struct { type dockerFetcher struct {
base url.URL base url.URL
token string token string
client *http.Client
useBasic bool
username string
secret string
} }
func (r *dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { func (r *dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
@ -213,13 +256,14 @@ func (r *dockerFetcher) url(ps ...string) string {
} }
func (r *dockerFetcher) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { func (r *dockerFetcher) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) return r.doRequestWithRetries(ctx, req, nil)
log.G(ctx).WithField("request.headers", req.Header).Debug("fetch content")
if r.token != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.token))
} }
resp, err := ctxhttp.Do(ctx, http.DefaultClient, req) func (r *dockerFetcher) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
log.G(ctx).WithField("request.headers", req.Header).Debug("fetch content")
r.authorize(req)
resp, err := ctxhttp.Do(ctx, r.client, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -228,50 +272,119 @@ func (r *dockerFetcher) doRequest(ctx context.Context, req *http.Request) (*http
"response.headers": resp.Header, "response.headers": resp.Header,
}).Debug("fetch response received") }).Debug("fetch response received")
responses = append(responses, resp)
req, err = r.retryRequest(ctx, req, responses)
if err != nil {
return nil, err
}
if req != nil {
return r.doRequestWithRetries(ctx, req, responses)
}
return resp, err return resp, err
} }
func getToken(ctx context.Context, scopes ...string) (string, error) { func (r *dockerFetcher) authorize(req *http.Request) {
var ( if r.useBasic {
u = url.URL{ req.SetBasicAuth(r.username, r.secret)
Scheme: "https", } else if r.token != "" {
Host: "auth.docker.io", req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.token))
Path: "/token", }
} }
q = url.Values{ func (r *dockerFetcher) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) {
"scope": scopes, if len(responses) > 5 {
"service": []string{"registry.docker.io"}, // usually comes from auth challenge return nil, nil
}
last := responses[len(responses)-1]
if last.StatusCode == http.StatusUnauthorized {
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
for _, c := range parseAuthHeader(last.Header) {
if c.scheme == bearerAuth {
if errStr := c.parameters["error"]; errStr != "" {
// TODO: handle expired case
return nil, errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr)
}
if err := r.setTokenAuth(ctx, c.parameters); err != nil {
return nil, err
}
return req, nil
} else if c.scheme == basicAuth {
if r.username != "" && r.secret != "" {
r.useBasic = true
}
return req, nil
}
}
return nil, nil
} else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead {
// Support registries which have not properly implemented the HEAD method for
// manifests endpoint
if strings.Contains(req.URL.Path, "/manifests/") {
// TODO: copy request?
req.Method = http.MethodGet
return req, nil
}
} }
)
u.RawQuery = q.Encode() // TODO: Handle 50x errors accounting for attempt history
return nil, nil
}
log.G(ctx).WithField("token.url", u.String()).Debug("requesting token") func isManifestAccept(h http.Header) bool {
resp, err := ctxhttp.Get(ctx, http.DefaultClient, u.String()) for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] {
switch ah {
case images.MediaTypeDockerSchema2Manifest:
fallthrough
case images.MediaTypeDockerSchema2ManifestList:
fallthrough
case ocispec.MediaTypeImageManifest:
fallthrough
case ocispec.MediaTypeImageIndex:
return true
}
}
return false
}
func (r *dockerFetcher) setTokenAuth(ctx context.Context, params map[string]string) error {
realm, ok := params["realm"]
if !ok {
return errors.New("no realm specified for token auth challenge")
}
realmURL, err := url.Parse(realm)
if err != nil { if err != nil {
return "", err return fmt.Errorf("invalid token auth challenge realm: %s", err)
}
defer resp.Body.Close()
if resp.StatusCode > 299 {
return "", errors.Errorf("unexpected status code: %v %v", resp.StatusCode, resp.Status)
} }
p, err := ioutil.ReadAll(resp.Body) to := tokenOptions{
realm: realmURL.String(),
service: params["service"],
}
scope, ok := params["scope"]
if !ok {
return errors.Errorf("no scope specified for token auth challenge")
}
// TODO: Get added scopes from context
to.scopes = []string{scope}
if r.secret != "" {
// Credential information is provided, use oauth POST endpoint
r.token, err = r.fetchTokenWithOAuth(ctx, to)
if err != nil { if err != nil {
return "", err return errors.Wrap(err, "failed to fetch oauth token")
}
} else {
// Do request anonymously
r.token, err = r.getToken(ctx, to)
if err != nil {
return errors.Wrap(err, "failed to fetch anonymous token")
}
} }
var tokenResponse struct { return nil
Token string `json:"token"`
}
if err := json.Unmarshal(p, &tokenResponse); err != nil {
return "", err
}
return tokenResponse.Token, nil
} }
// getV2URLPaths generates the candidate urls paths for the object based on the // getV2URLPaths generates the candidate urls paths for the object based on the
@ -291,3 +404,125 @@ func getV2URLPaths(desc ocispec.Descriptor) ([]string, error) {
return urls, nil return urls, nil
} }
type tokenOptions struct {
realm string
service string
scopes []string
}
type postTokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Scope string `json:"scope"`
}
func (r *dockerFetcher) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
form := url.Values{}
form.Set("scope", strings.Join(to.scopes, " "))
form.Set("service", to.service)
// TODO: Allow setting client_id
form.Set("client_id", "containerd-dist-tool")
if r.username == "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", r.secret)
} else {
form.Set("grant_type", "password")
form.Set("username", r.username)
form.Set("password", r.secret)
}
resp, err := ctxhttp.PostForm(ctx, r.client, to.realm, form)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode == 405 && r.username != "" {
// It would be nice if registries would implement the specifications
return r.getToken(ctx, to)
} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
b, _ := ioutil.ReadAll(resp.Body)
log.G(ctx).WithFields(logrus.Fields{
"status": resp.Status,
"body": string(b),
}).Debugf("token request failed")
// TODO: handle error body and write debug output
return "", errors.Errorf("unexpected status: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", fmt.Errorf("unable to decode token response: %s", err)
}
return tr.AccessToken, nil
}
type getTokenResponse struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
RefreshToken string `json:"refresh_token"`
}
// getToken fetches a token using a GET request
func (r *dockerFetcher) getToken(ctx context.Context, to tokenOptions) (string, error) {
req, err := http.NewRequest("GET", to.realm, nil)
if err != nil {
return "", err
}
reqParams := req.URL.Query()
if to.service != "" {
reqParams.Add("service", to.service)
}
for _, scope := range to.scopes {
reqParams.Add("scope", scope)
}
if r.secret != "" {
req.SetBasicAuth(r.username, r.secret)
}
req.URL.RawQuery = reqParams.Encode()
resp, err := ctxhttp.Do(ctx, r.client, req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
// TODO: handle error body and write debug output
return "", errors.Errorf("unexpected status: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", fmt.Errorf("unable to decode token response: %s", err)
}
// `access_token` is equivalent to `token` and if both are specified
// the choice is undefined. Canonicalize `access_token` by sticking
// things in `token`.
if tr.AccessToken != "" {
tr.Token = tr.AccessToken
}
if tr.Token == "" {
return "", ErrNoToken
}
return tr.Token, nil
}

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"io" "io"
"io/ioutil" "io/ioutil"
"os"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/archive" "github.com/containerd/containerd/archive"
@ -41,6 +42,7 @@ func ApplyLayer(snapshots snapshot.Snapshotter, mounter Mounter, rd io.Reader, p
if err != nil { if err != nil {
return "", errors.Wrapf(err, "creating temporary directory failed") return "", errors.Wrapf(err, "creating temporary directory failed")
} }
defer os.RemoveAll(dir)
// TODO(stevvooe): Choose this key WAY more carefully. We should be able to // TODO(stevvooe): Choose this key WAY more carefully. We should be able to
// create collisions for concurrent, conflicting unpack processes but we // create collisions for concurrent, conflicting unpack processes but we
@ -69,7 +71,7 @@ func ApplyLayer(snapshots snapshot.Snapshotter, mounter Mounter, rd io.Reader, p
digester := digest.Canonical.Digester() // used to calculate diffID. digester := digest.Canonical.Digester() // used to calculate diffID.
rd = io.TeeReader(rd, digester.Hash()) rd = io.TeeReader(rd, digester.Hash())
if _, err := archive.Apply(context.Background(), key, rd); err != nil { if _, err := archive.Apply(context.Background(), dir, rd); err != nil {
return "", err return "", err
} }
@ -80,7 +82,7 @@ func ApplyLayer(snapshots snapshot.Snapshotter, mounter Mounter, rd io.Reader, p
chainID = identity.ChainID([]digest.Digest{parent, chainID}) chainID = identity.ChainID([]digest.Digest{parent, chainID})
} }
if _, err := snapshots.Stat(ctx, chainID.String()); err == nil { if _, err := snapshots.Stat(ctx, chainID.String()); err == nil {
return diffID, nil //TODO: call snapshots.Remove(ctx, key) once implemented return diffID, snapshots.Remove(ctx, key)
} }
return diffID, snapshots.Commit(ctx, chainID.String(), key) return diffID, snapshots.Commit(ctx, chainID.String(), key)
@ -107,14 +109,11 @@ func Prepare(ctx context.Context, snapshots snapshot.Snapshotter, mounter Mounte
) )
for _, layer := range layers { for _, layer := range layers {
// TODO: layer.Digest should not be string
// (https://github.com/opencontainers/image-spec/pull/514)
layerDigest := digest.Digest(layer.Digest)
// This will convert a possibly compressed layer hash to the // This will convert a possibly compressed layer hash to the
// uncompressed hash, if we know about it. If we don't, we unpack and // uncompressed hash, if we know about it. If we don't, we unpack and
// calculate it. If we do have it, we then calculate the chain id for // calculate it. If we do have it, we then calculate the chain id for
// the application and see if the snapshot is there. // the application and see if the snapshot is there.
diffID := resolveDiffID(layerDigest) diffID := resolveDiffID(layer.Digest)
if diffID != "" { if diffID != "" {
chainLocal := append(chain, diffID) chainLocal := append(chain, diffID)
chainID := identity.ChainID(chainLocal) chainID := identity.ChainID(chainLocal)
@ -124,7 +123,7 @@ func Prepare(ctx context.Context, snapshots snapshot.Snapshotter, mounter Mounte
} }
} }
rc, err := openBlob(ctx, layerDigest) rc, err := openBlob(ctx, layer.Digest)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -139,7 +138,7 @@ func Prepare(ctx context.Context, snapshots snapshot.Snapshotter, mounter Mounte
// For uncompressed layers, this will be the same. For compressed // For uncompressed layers, this will be the same. For compressed
// layers, we can look up the diffID from the digest if we've already // layers, we can look up the diffID from the digest if we've already
// unpacked it. // unpacked it.
if err := registerDiffID(diffID, layerDigest); err != nil { if err := registerDiffID(diffID, layer.Digest); err != nil {
return "", err return "", err
} }

View File

@ -17,3 +17,14 @@ func rewriteGRPCError(err error) error {
return err return err
} }
func serverErrorToGRPC(err error, id string) error {
switch {
case content.IsNotFound(err):
return grpc.Errorf(codes.NotFound, "%v: not found", id)
case content.IsExists(err):
return grpc.Errorf(codes.AlreadyExists, "%v: exists", id)
}
return err
}

View File

@ -1,35 +1,9 @@
package content package content
import ( import (
"context"
"io"
contentapi "github.com/containerd/containerd/api/services/content" contentapi "github.com/containerd/containerd/api/services/content"
"github.com/containerd/containerd/content"
digest "github.com/opencontainers/go-digest"
) )
func NewProviderFromClient(client contentapi.ContentClient) content.Provider {
return &remoteProvider{
client: client,
}
}
type remoteProvider struct {
client contentapi.ContentClient
}
func (rp *remoteProvider) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
client, err := rp.client.Read(ctx, &contentapi.ReadRequest{Digest: dgst})
if err != nil {
return nil, err
}
return &remoteReader{
client: client,
}, nil
}
type remoteReader struct { type remoteReader struct {
client contentapi.Content_ReadClient client contentapi.Content_ReadClient
extra []byte extra []byte

View File

@ -18,7 +18,7 @@ import (
) )
type Service struct { type Service struct {
store *content.Store store content.Store
} }
var bufPool = sync.Pool{ var bufPool = sync.Pool{
@ -52,25 +52,68 @@ func (s *Service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResp
return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
} }
bi, err := s.store.Info(req.Digest) bi, err := s.store.Info(ctx, req.Digest)
if err != nil { if err != nil {
return nil, maybeNotFoundGRPC(err, req.Digest.String()) return nil, serverErrorToGRPC(err, req.Digest.String())
} }
return &api.InfoResponse{ return &api.InfoResponse{
Digest: req.Digest, Info: api.Info{
Digest: bi.Digest,
Size_: bi.Size, Size_: bi.Size,
CommittedAt: bi.CommittedAt, CommittedAt: bi.CommittedAt,
},
}, nil }, nil
} }
func (s *Service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
var (
buffer []api.Info
sendBlock = func(block []api.Info) error {
// send last block
return session.Send(&api.ListContentResponse{
Info: block,
})
}
)
if err := s.store.Walk(session.Context(), func(info content.Info) error {
buffer = append(buffer, api.Info{
Digest: info.Digest,
Size_: info.Size,
CommittedAt: info.CommittedAt,
})
if len(buffer) >= 100 {
if err := sendBlock(buffer); err != nil {
return err
}
buffer = buffer[:0]
}
return nil
}); err != nil {
return err
}
if len(buffer) > 0 {
// send last block
if err := sendBlock(buffer); err != nil {
return err
}
}
return nil
}
func (s *Service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) { func (s *Service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) {
if err := req.Digest.Validate(); err != nil { if err := req.Digest.Validate(); err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, err.Error()) return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
} }
if err := s.store.Delete(req.Digest); err != nil { if err := s.store.Delete(ctx, req.Digest); err != nil {
return nil, maybeNotFoundGRPC(err, req.Digest.String()) return nil, serverErrorToGRPC(err, req.Digest.String())
} }
return &empty.Empty{}, nil return &empty.Empty{}, nil
@ -81,14 +124,14 @@ func (s *Service) Read(req *api.ReadRequest, session api.Content_ReadServer) err
return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
} }
oi, err := s.store.Info(req.Digest) oi, err := s.store.Info(session.Context(), req.Digest)
if err != nil { if err != nil {
return maybeNotFoundGRPC(err, req.Digest.String()) return serverErrorToGRPC(err, req.Digest.String())
} }
rc, err := s.store.Reader(session.Context(), req.Digest) rc, err := s.store.Reader(session.Context(), req.Digest)
if err != nil { if err != nil {
return maybeNotFoundGRPC(err, req.Digest.String()) return serverErrorToGRPC(err, req.Digest.String())
} }
defer rc.Close() // TODO(stevvooe): Cache these file descriptors for performance. defer rc.Close() // TODO(stevvooe): Cache these file descriptors for performance.
@ -132,6 +175,10 @@ func (s *Service) Read(req *api.ReadRequest, session api.Content_ReadServer) err
return nil return nil
} }
// readResponseWriter is a writer that places the output into ReadResponse messages.
//
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
// into the buffer size.
type readResponseWriter struct { type readResponseWriter struct {
offset int64 offset int64
session api.Content_ReadServer session api.Content_ReadServer
@ -149,6 +196,27 @@ func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
func (s *Service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
statuses, err := s.store.Status(ctx, req.Regexp)
if err != nil {
return nil, serverErrorToGRPC(err, req.Regexp)
}
var resp api.StatusResponse
for _, status := range statuses {
resp.Statuses = append(resp.Statuses, api.Status{
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Ref: status.Ref,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
})
}
return &resp, nil
}
func (s *Service) Write(session api.Content_WriteServer) (err error) { func (s *Service) Write(session api.Content_WriteServer) (err error) {
var ( var (
ctx = session.Context() ctx = session.Context()
@ -243,8 +311,8 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
} }
expected = req.Expected expected = req.Expected
if _, err := s.store.Info(req.Expected); err == nil { if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
if err := s.store.Abort(ref); err != nil { if err := s.store.Abort(session.Context(), ref); err != nil {
log.G(ctx).WithError(err).Error("failed to abort write") log.G(ctx).WithError(err).Error("failed to abort write")
} }
@ -304,12 +372,10 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
if err := wr.Commit(total, expected); err != nil { if err := wr.Commit(total, expected); err != nil {
return err return err
} }
}
msg.Digest = wr.Digest() msg.Digest = wr.Digest()
} }
case api.WriteActionAbort:
return s.store.Abort(ref)
}
if err := session.Send(&msg); err != nil { if err := session.Send(&msg); err != nil {
return err return err
@ -324,18 +390,12 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
return err return err
} }
} }
return nil
} }
func (s *Service) Status(*api.StatusRequest, api.Content_StatusServer) error { func (s *Service) Abort(ctx context.Context, req *api.AbortRequest) (*empty.Empty, error) {
return grpc.Errorf(codes.Unimplemented, "not implemented") if err := s.store.Abort(ctx, req.Ref); err != nil {
return nil, serverErrorToGRPC(err, req.Ref)
} }
func maybeNotFoundGRPC(err error, id string) error { return &empty.Empty{}, nil
if content.IsNotFound(err) {
return grpc.Errorf(codes.NotFound, "%v: not found", id)
}
return err
} }

View File

@ -0,0 +1,155 @@
package content
import (
"context"
"io"
contentapi "github.com/containerd/containerd/api/services/content"
"github.com/containerd/containerd/content"
digest "github.com/opencontainers/go-digest"
)
type remoteStore struct {
client contentapi.ContentClient
}
func NewStoreFromClient(client contentapi.ContentClient) content.Store {
return &remoteStore{
client: client,
}
}
func (rs *remoteStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{
Digest: dgst,
})
if err != nil {
return content.Info{}, rewriteGRPCError(err)
}
return content.Info{
Digest: resp.Info.Digest,
Size: resp.Info.Size_,
CommittedAt: resp.Info.CommittedAt,
}, nil
}
func (rs *remoteStore) Walk(ctx context.Context, fn content.WalkFunc) error {
session, err := rs.client.List(ctx, &contentapi.ListContentRequest{})
if err != nil {
return rewriteGRPCError(err)
}
for {
msg, err := session.Recv()
if err != nil {
if err != io.EOF {
return rewriteGRPCError(err)
}
break
}
for _, info := range msg.Info {
if err := fn(content.Info{
Digest: info.Digest,
Size: info.Size_,
CommittedAt: info.CommittedAt,
}); err != nil {
return err
}
}
}
return nil
}
func (rs *remoteStore) Delete(ctx context.Context, dgst digest.Digest) error {
if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{
Digest: dgst,
}); err != nil {
return rewriteGRPCError(err)
}
return nil
}
func (rs *remoteStore) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
client, err := rs.client.Read(ctx, &contentapi.ReadRequest{Digest: dgst})
if err != nil {
return nil, err
}
return &remoteReader{
client: client,
}, nil
}
func (rs *remoteStore) Status(ctx context.Context, re string) ([]content.Status, error) {
resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{
Regexp: re,
})
if err != nil {
return nil, rewriteGRPCError(err)
}
var statuses []content.Status
for _, status := range resp.Statuses {
statuses = append(statuses, content.Status{
Ref: status.Ref,
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
})
}
return statuses, nil
}
func (rs *remoteStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
wrclient, offset, err := rs.negotiate(ctx, ref, size, expected)
if err != nil {
return nil, rewriteGRPCError(err)
}
return &remoteWriter{
client: wrclient,
offset: offset,
}, nil
}
// Abort implements asynchronous abort. It starts a new write session on the ref l
func (rs *remoteStore) Abort(ctx context.Context, ref string) error {
if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{
Ref: ref,
}); err != nil {
return rewriteGRPCError(err)
}
return nil
}
func (rs *remoteStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
wrclient, err := rs.client.Write(ctx)
if err != nil {
return nil, 0, err
}
if err := wrclient.Send(&contentapi.WriteRequest{
Action: contentapi.WriteActionStat,
Ref: ref,
Total: size,
Expected: expected,
}); err != nil {
return nil, 0, err
}
resp, err := wrclient.Recv()
if err != nil {
return nil, 0, err
}
return wrclient, resp.Offset, nil
}

View File

@ -1,61 +1,14 @@
package content package content
import ( import (
"context"
"io" "io"
contentapi "github.com/containerd/containerd/api/services/content" contentapi "github.com/containerd/containerd/api/services/content"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
func NewIngesterFromClient(client contentapi.ContentClient) content.Ingester {
return &remoteIngester{
client: client,
}
}
type remoteIngester struct {
client contentapi.ContentClient
}
func (ri *remoteIngester) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
wrclient, offset, err := ri.negotiate(ctx, ref, size, expected)
if err != nil {
return nil, rewriteGRPCError(err)
}
return &remoteWriter{
client: wrclient,
offset: offset,
}, nil
}
func (ri *remoteIngester) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
wrclient, err := ri.client.Write(ctx)
if err != nil {
return nil, 0, err
}
if err := wrclient.Send(&contentapi.WriteRequest{
Action: contentapi.WriteActionStat,
Ref: ref,
Total: size,
Expected: expected,
}); err != nil {
return nil, 0, err
}
resp, err := wrclient.Recv()
if err != nil {
return nil, 0, err
}
return wrclient, resp.Offset, nil
}
type remoteWriter struct { type remoteWriter struct {
ref string ref string
client contentapi.Content_WriteClient client contentapi.Content_WriteClient
@ -127,6 +80,9 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
} }
rw.offset += int64(n) rw.offset += int64(n)
if resp.Digest != "" {
rw.digest = resp.Digest
}
return return
} }
@ -149,6 +105,8 @@ func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected) return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
} }
rw.digest = resp.Digest
rw.offset = resp.Offset
return nil return nil
} }

View File

@ -26,11 +26,11 @@ func init() {
} }
type Service struct { type Service struct {
store *content.Store store content.Store
snapshotter snapshot.Snapshotter snapshotter snapshot.Snapshotter
} }
func NewService(store *content.Store, snapshotter snapshot.Snapshotter) (*Service, error) { func NewService(store content.Store, snapshotter snapshot.Snapshotter) (*Service, error) {
return &Service{ return &Service{
store: store, store: store,
snapshotter: snapshotter, snapshotter: snapshotter,

View File

@ -23,6 +23,24 @@ type Info struct {
Readonly bool // true if readonly, only valid for active Readonly bool // true if readonly, only valid for active
} }
// Usage defines statistics for disk resources consumed by the snapshot.
//
// These resources only include the resources consumed by the snapshot itself
// and does not include resources usage by the parent.
type Usage struct {
Inodes int64 // number of inodes in use.
Size int64 // provides usage, in bytes, of snapshot
}
func (u *Usage) Add(other Usage) {
u.Size += other.Size
// TODO(stevvooe): assumes independent inodes, but provides and upper
// bound. This should be pretty close, assumming the inodes for a
// snapshot are roughly unique to it. Don't trust this assumption.
u.Inodes += other.Inodes
}
// Snapshotter defines the methods required to implement a snapshot snapshotter for // Snapshotter defines the methods required to implement a snapshot snapshotter for
// allocating, snapshotting and mounting filesystem changesets. The model works // allocating, snapshotting and mounting filesystem changesets. The model works
// by building up sets of changes with parent-child relationships. // by building up sets of changes with parent-child relationships.
@ -45,6 +63,7 @@ type Info struct {
// For consistency, we define the following terms to be used throughout this // For consistency, we define the following terms to be used throughout this
// interface for snapshotter implementations: // interface for snapshotter implementations:
// //
// `ctx` - refers to a context.Context
// `key` - refers to an active snapshot // `key` - refers to an active snapshot
// `name` - refers to a committed snapshot // `name` - refers to a committed snapshot
// `parent` - refers to the parent in relation // `parent` - refers to the parent in relation
@ -71,14 +90,14 @@ type Info struct {
// We start by using a Snapshotter to Prepare a new snapshot transaction, using a // We start by using a Snapshotter to Prepare a new snapshot transaction, using a
// key and descending from the empty parent "": // key and descending from the empty parent "":
// //
// mounts, err := snapshotter.Prepare(key, "") // mounts, err := snapshotter.Prepare(ctx, key, "")
// if err != nil { ... } // if err != nil { ... }
// //
// We get back a list of mounts from Snapshotter.Prepare, with the key identifying // We get back a list of mounts from Snapshotter.Prepare, with the key identifying
// the active snapshot. Mount this to the temporary location with the // the active snapshot. Mount this to the temporary location with the
// following: // following:
// //
// if err := MountAll(mounts, tmpDir); err != nil { ... } // if err := containerd.MountAll(mounts, tmpDir); err != nil { ... }
// //
// Once the mounts are performed, our temporary location is ready to capture // Once the mounts are performed, our temporary location is ready to capture
// a diff. In practice, this works similar to a filesystem transaction. The // a diff. In practice, this works similar to a filesystem transaction. The
@ -102,21 +121,21 @@ type Info struct {
// snapshot to a name. For this example, we are just going to use the layer // snapshot to a name. For this example, we are just going to use the layer
// digest, but in practice, this will probably be the ChainID: // digest, but in practice, this will probably be the ChainID:
// //
// if err := snapshotter.Commit(digest.String(), key); err != nil { ... } // if err := snapshotter.Commit(ctx, digest.String(), key); err != nil { ... }
// //
// Now, we have a layer in the Snapshotter that can be accessed with the digest // Now, we have a layer in the Snapshotter that can be accessed with the digest
// provided during commit. Once you have committed the snapshot, the active // provided during commit. Once you have committed the snapshot, the active
// snapshot can be removed with the following: // snapshot can be removed with the following:
// //
// snapshotter.Remove(key) // snapshotter.Remove(ctx, key)
// //
// Importing the Next Layer // Importing the Next Layer
// //
// Making a layer depend on the above is identical to the process described // Making a layer depend on the above is identical to the process described
// above except that the parent is provided as parent when calling // above except that the parent is provided as parent when calling
// Manager.Prepare, assuming a clean tmpLocation: // Manager.Prepare, assuming a clean, unique key identifier:
// //
// mounts, err := snapshotter.Prepare(tmpLocation, parentDigest) // mounts, err := snapshotter.Prepare(ctx, key, parentDigest)
// //
// We then mount, apply and commit, as we did above. The new snapshot will be // We then mount, apply and commit, as we did above. The new snapshot will be
// based on the content of the previous one. // based on the content of the previous one.
@ -127,13 +146,13 @@ type Info struct {
// snapshot as the parent. After mounting, the prepared path can // snapshot as the parent. After mounting, the prepared path can
// be used directly as the container's filesystem: // be used directly as the container's filesystem:
// //
// mounts, err := snapshotter.Prepare(containerKey, imageRootFSChainID) // mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID)
// //
// The returned mounts can then be passed directly to the container runtime. If // The returned mounts can then be passed directly to the container runtime. If
// one would like to create a new image from the filesystem, Manager.Commit is // one would like to create a new image from the filesystem, Manager.Commit is
// called: // called:
// //
// if err := snapshotter.Commit(newImageSnapshot, containerKey); err != nil { ... } // if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... }
// //
// Alternatively, for most container runs, Snapshotter.Remove will be called to // Alternatively, for most container runs, Snapshotter.Remove will be called to
// signal the Snapshotter to abandon the changes. // signal the Snapshotter to abandon the changes.
@ -145,6 +164,16 @@ type Snapshotter interface {
// the kind of snapshot. // the kind of snapshot.
Stat(ctx context.Context, key string) (Info, error) Stat(ctx context.Context, key string) (Info, error)
// Usage returns the resource usage of an active or committed snapshot
// excluding the usage of parent snapshots.
//
// The running time of this call for active snapshots is dependent on
// implementation, but may be proportional to the size of the resource.
// Callers should take this into consideration. Implementations should
// attempt to honer context cancellation and avoid taking locks when making
// the calculation.
Usage(ctx context.Context, key string) (Usage, error)
// Mounts returns the mounts for the active snapshot transaction identified // Mounts returns the mounts for the active snapshot transaction identified
// by key. Can be called on an read-write or readonly transaction. This is // by key. Can be called on an read-write or readonly transaction. This is
// available only for active snapshots. // available only for active snapshots.
@ -203,7 +232,7 @@ type Snapshotter interface {
// removed before proceeding. // removed before proceeding.
Remove(ctx context.Context, key string) error Remove(ctx context.Context, key string) error
// Walk the committed snapshots. For each snapshot in the snapshotter, the // Walk all snapshots in the snapshotter. For each snapshot in the
// function will be called. // snapshotter, the function will be called.
Walk(ctx context.Context, fn func(context.Context, Info) error) error Walk(ctx context.Context, fn func(context.Context, Info) error) error
} }

View File

@ -28,6 +28,11 @@ func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
return l, err return l, err
} }
if err := os.Chmod(path, 0660); err != nil {
l.Close()
return nil, err
}
if err := os.Chown(path, uid, gid); err != nil { if err := os.Chown(path, uid, gid); err != nil {
l.Close() l.Close()
return nil, err return nil, err

View File

@ -1,7 +1,9 @@
github.com/crosbymichael/go-runc 65847bfc51952703ca24b564d10de50d3f2db6e7 github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
github.com/crosbymichael/console f13f890e20a94bdec6c328cdf9410b7158f0cfa4 github.com/containerd/go-runc 5fe4d8cb7fdc0fae5f5a7f4f1d65a565032401b2
github.com/crosbymichael/cgroups a692a19766b072b86d89620c97a7916b2e2de3e7 github.com/containerd/console a3863895279f5104533fd999c1babf80faffd98c
github.com/containerd/cgroups 7b2d1a0f50963678d5799e29d17a4d611f5a5dee
github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87 github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
github.com/prometheus/client_golang v0.8.0 github.com/prometheus/client_golang v0.8.0
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 195bde7883f7c39ea62b0d92ab7359b5327065cb github.com/prometheus/common 195bde7883f7c39ea62b0d92ab7359b5327065cb
@ -14,24 +16,24 @@ github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/opencontainers/runc 50401b5b4c2e01e4f1372b73a021742deeaf4e2d github.com/opencontainers/runc 50401b5b4c2e01e4f1372b73a021742deeaf4e2d
github.com/opencontainers/runtime-spec 035da1dca3dfbb00d752eb58b0b158d6129f3776 github.com/opencontainers/runtime-spec 035da1dca3dfbb00d752eb58b0b158d6129f3776
github.com/Sirupsen/logrus v0.11.0 github.com/Sirupsen/logrus v0.11.0
github.com/stevvooe/go-btrfs ea304655a3ed8f00773db1844f921d12541ee0d1 github.com/containerd/btrfs e9c546f46bccffefe71a6bc137e4c21b5503cc18
github.com/stretchr/testify v1.1.4 github.com/stretchr/testify v1.1.4
github.com/davecgh/go-spew v1.1.0 github.com/davecgh/go-spew v1.1.0
github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib v1.0.0
github.com/tonistiigi/fifo fe870ccf293940774c2b44e23f6c71fff8f7547d github.com/containerd/fifo 1c36a62ed52ac0235d524d6371b746db4e4eef72
github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca
golang.org/x/net 8b4af36cd21a1f85a7484b49feb7c79363106d8e golang.org/x/net 8b4af36cd21a1f85a7484b49feb7c79363106d8e
google.golang.org/grpc v1.0.5 google.golang.org/grpc v1.0.5
github.com/pkg/errors v0.8.0 github.com/pkg/errors v0.8.0
github.com/nightlyone/lockfile 1d49c987357a327b5b03aa84cbddd582c328615d github.com/nightlyone/lockfile 1d49c987357a327b5b03aa84cbddd582c328615d
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
golang.org/x/sys/unix f3918c30c5c2cb527c0b071a27c35120a6c0719a golang.org/x/sys f3918c30c5c2cb527c0b071a27c35120a6c0719a
github.com/opencontainers/image-spec a431dbcf6a74fca2e0e040b819a836dbe3fb23ca github.com/opencontainers/image-spec a431dbcf6a74fca2e0e040b819a836dbe3fb23ca
github.com/stevvooe/continuity 577e137350afb00343495f55bb8671fe7e22b0bf github.com/containerd/continuity 6414d06cab9e2fe082ea29ff42aab627e740d00c
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/BurntSushi/toml v0.2.0-21-g9906417 github.com/BurntSushi/toml v0.2.0-21-g9906417
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/Microsoft/go-winio fff283ad5116362ca252298cfc9b95828956d85d github.com/Microsoft/go-winio fff283ad5116362ca252298cfc9b95828956d85d
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/Microsoft/hcsshim v0.5.15 github.com/Microsoft/hcsshim v0.5.15
github.com/Azure/go-ansiterm/winterm fa152c58bc15761d0200cb75fe958b89a9d4888e github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e

View File

@ -6,16 +6,19 @@
package assert package assert
import ( import (
http "net/http" http "net/http"
url "net/url" url "net/url"
time "time" time "time"
) )
// Condition uses a Comparison to assert a complex condition. // Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
return Condition(a.t, comp, msgAndArgs...) return Condition(a.t, comp, msgAndArgs...)
} }
// Contains asserts that the specified string, list(array, slice...) or map contains the // Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element. // specified substring or element.
// //
@ -28,6 +31,7 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ..
return Contains(a.t, s, contains, msgAndArgs...) return Contains(a.t, s, contains, msgAndArgs...)
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0. // a slice or a channel with len == 0.
// //
@ -38,6 +42,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...) return Empty(a.t, object, msgAndArgs...)
} }
// Equal asserts that two objects are equal. // Equal asserts that two objects are equal.
// //
// a.Equal(123, 123, "123 and 123 should be equal") // a.Equal(123, 123, "123 and 123 should be equal")
@ -47,17 +52,21 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs
return Equal(a.t, expected, actual, msgAndArgs...) return Equal(a.t, expected, actual, msgAndArgs...)
} }
// EqualError asserts that a function returned an error (i.e. not `nil`) // EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error. // and that it is equal to the provided error.
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// a.EqualError(err, expectedErrorString, "An error was expected") // if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
return EqualError(a.t, theError, errString, msgAndArgs...) return EqualError(a.t, theError, errString, msgAndArgs...)
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertable to the same types
// and equal. // and equal.
// //
@ -68,6 +77,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
return EqualValues(a.t, expected, actual, msgAndArgs...) return EqualValues(a.t, expected, actual, msgAndArgs...)
} }
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -80,6 +90,7 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...) return Error(a.t, err, msgAndArgs...)
} }
// Exactly asserts that two objects are equal is value and type. // Exactly asserts that two objects are equal is value and type.
// //
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") // a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
@ -89,16 +100,19 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
return Exactly(a.t, expected, actual, msgAndArgs...) return Exactly(a.t, expected, actual, msgAndArgs...)
} }
// Fail reports a failure through // Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
return Fail(a.t, failureMessage, msgAndArgs...) return Fail(a.t, failureMessage, msgAndArgs...)
} }
// FailNow fails test // FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
return FailNow(a.t, failureMessage, msgAndArgs...) return FailNow(a.t, failureMessage, msgAndArgs...)
} }
// False asserts that the specified value is false. // False asserts that the specified value is false.
// //
// a.False(myBool, "myBool should be false") // a.False(myBool, "myBool should be false")
@ -108,6 +122,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
return False(a.t, value, msgAndArgs...) return False(a.t, value, msgAndArgs...)
} }
// HTTPBodyContains asserts that a specified handler returns a // HTTPBodyContains asserts that a specified handler returns a
// body that contains a string. // body that contains a string.
// //
@ -118,6 +133,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
return HTTPBodyContains(a.t, handler, method, url, values, str) return HTTPBodyContains(a.t, handler, method, url, values, str)
} }
// HTTPBodyNotContains asserts that a specified handler returns a // HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string. // body that does not contain a string.
// //
@ -128,6 +144,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
return HTTPBodyNotContains(a.t, handler, method, url, values, str) return HTTPBodyNotContains(a.t, handler, method, url, values, str)
} }
// HTTPError asserts that a specified handler returns an error status code. // HTTPError asserts that a specified handler returns an error status code.
// //
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@ -137,6 +154,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
return HTTPError(a.t, handler, method, url, values) return HTTPError(a.t, handler, method, url, values)
} }
// HTTPRedirect asserts that a specified handler returns a redirect status code. // HTTPRedirect asserts that a specified handler returns a redirect status code.
// //
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@ -146,6 +164,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
return HTTPRedirect(a.t, handler, method, url, values) return HTTPRedirect(a.t, handler, method, url, values)
} }
// HTTPSuccess asserts that a specified handler returns a success status code. // HTTPSuccess asserts that a specified handler returns a success status code.
// //
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@ -155,6 +174,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st
return HTTPSuccess(a.t, handler, method, url, values) return HTTPSuccess(a.t, handler, method, url, values)
} }
// Implements asserts that an object is implemented by the specified interface. // Implements asserts that an object is implemented by the specified interface.
// //
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") // a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
@ -162,6 +182,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
return Implements(a.t, interfaceObject, object, msgAndArgs...) return Implements(a.t, interfaceObject, object, msgAndArgs...)
} }
// InDelta asserts that the two numerals are within delta of each other. // InDelta asserts that the two numerals are within delta of each other.
// //
// a.InDelta(math.Pi, (22 / 7.0), 0.01) // a.InDelta(math.Pi, (22 / 7.0), 0.01)
@ -171,11 +192,13 @@ func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta flo
return InDelta(a.t, expected, actual, delta, msgAndArgs...) return InDelta(a.t, expected, actual, delta, msgAndArgs...)
} }
// InDeltaSlice is the same as InDelta, except it compares two slices. // InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
} }
// InEpsilon asserts that expected and actual have a relative error less than epsilon // InEpsilon asserts that expected and actual have a relative error less than epsilon
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
@ -183,16 +206,19 @@ func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
} }
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { // InEpsilonSlice is the same as InEpsilon, except it compares two slices.
return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
} }
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return IsType(a.t, expectedType, object, msgAndArgs...) return IsType(a.t, expectedType, object, msgAndArgs...)
} }
// JSONEq asserts that two JSON strings are equivalent. // JSONEq asserts that two JSON strings are equivalent.
// //
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@ -202,6 +228,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf
return JSONEq(a.t, expected, actual, msgAndArgs...) return JSONEq(a.t, expected, actual, msgAndArgs...)
} }
// Len asserts that the specified object has specific length. // Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept. // Len also fails if the object has a type that len() not accept.
// //
@ -212,6 +239,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface
return Len(a.t, object, length, msgAndArgs...) return Len(a.t, object, length, msgAndArgs...)
} }
// Nil asserts that the specified object is nil. // Nil asserts that the specified object is nil.
// //
// a.Nil(err, "err should be nothing") // a.Nil(err, "err should be nothing")
@ -221,6 +249,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
return Nil(a.t, object, msgAndArgs...) return Nil(a.t, object, msgAndArgs...)
} }
// NoError asserts that a function returned no error (i.e. `nil`). // NoError asserts that a function returned no error (i.e. `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -233,6 +262,7 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
return NoError(a.t, err, msgAndArgs...) return NoError(a.t, err, msgAndArgs...)
} }
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element. // specified substring or element.
// //
@ -245,6 +275,7 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs
return NotContains(a.t, s, contains, msgAndArgs...) return NotContains(a.t, s, contains, msgAndArgs...)
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0. // a slice or a channel with len == 0.
// //
@ -257,6 +288,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
return NotEmpty(a.t, object, msgAndArgs...) return NotEmpty(a.t, object, msgAndArgs...)
} }
// NotEqual asserts that the specified values are NOT equal. // NotEqual asserts that the specified values are NOT equal.
// //
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") // a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
@ -266,6 +298,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
return NotEqual(a.t, expected, actual, msgAndArgs...) return NotEqual(a.t, expected, actual, msgAndArgs...)
} }
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
// //
// a.NotNil(err, "err should be something") // a.NotNil(err, "err should be something")
@ -275,6 +308,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool
return NotNil(a.t, object, msgAndArgs...) return NotNil(a.t, object, msgAndArgs...)
} }
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
// //
// a.NotPanics(func(){ // a.NotPanics(func(){
@ -286,6 +320,7 @@ func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool
return NotPanics(a.t, f, msgAndArgs...) return NotPanics(a.t, f, msgAndArgs...)
} }
// NotRegexp asserts that a specified regexp does not match a string. // NotRegexp asserts that a specified regexp does not match a string.
// //
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
@ -296,11 +331,13 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
return NotRegexp(a.t, rx, str, msgAndArgs...) return NotRegexp(a.t, rx, str, msgAndArgs...)
} }
// NotZero asserts that i is not the zero value for its type and returns the truth. // NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
return NotZero(a.t, i, msgAndArgs...) return NotZero(a.t, i, msgAndArgs...)
} }
// Panics asserts that the code inside the specified PanicTestFunc panics. // Panics asserts that the code inside the specified PanicTestFunc panics.
// //
// a.Panics(func(){ // a.Panics(func(){
@ -312,6 +349,7 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...) return Panics(a.t, f, msgAndArgs...)
} }
// Regexp asserts that a specified regexp matches a string. // Regexp asserts that a specified regexp matches a string.
// //
// a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp(regexp.MustCompile("start"), "it's starting")
@ -322,6 +360,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
return Regexp(a.t, rx, str, msgAndArgs...) return Regexp(a.t, rx, str, msgAndArgs...)
} }
// True asserts that the specified value is true. // True asserts that the specified value is true.
// //
// a.True(myBool, "myBool should be true") // a.True(myBool, "myBool should be true")
@ -331,6 +370,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
return True(a.t, value, msgAndArgs...) return True(a.t, value, msgAndArgs...)
} }
// WithinDuration asserts that the two times are within duration delta of each other. // WithinDuration asserts that the two times are within duration delta of each other.
// //
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") // a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
@ -340,6 +380,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
} }
// Zero asserts that i is the zero value for its type and returns the truth. // Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
return Zero(a.t, i, msgAndArgs...) return Zero(a.t, i, msgAndArgs...)

View File

@ -18,6 +18,10 @@ import (
"github.com/pmezard/go-difflib/difflib" "github.com/pmezard/go-difflib/difflib"
) )
func init() {
spew.Config.SortKeys = true
}
// TestingT is an interface wrapper around *testing.T // TestingT is an interface wrapper around *testing.T
type TestingT interface { type TestingT interface {
Errorf(format string, args ...interface{}) Errorf(format string, args ...interface{})
@ -275,9 +279,8 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
if !ObjectsAreEqual(expected, actual) { if !ObjectsAreEqual(expected, actual) {
diff := diff(expected, actual) diff := diff(expected, actual)
expected, actual = formatUnequalValues(expected, actual) expected, actual = formatUnequalValues(expected, actual)
return Fail(t, fmt.Sprintf("Not equal: \n"+ return Fail(t, fmt.Sprintf("Not equal: %s (expected)\n"+
"expected: %s\n"+ " != %s (actual)%s", expected, actual, diff), msgAndArgs...)
"received: %s%s", expected, actual, diff), msgAndArgs...)
} }
return true return true
@ -325,11 +328,8 @@ func isNumericType(t reflect.Type) bool {
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if !ObjectsAreEqualValues(expected, actual) { if !ObjectsAreEqualValues(expected, actual) {
diff := diff(expected, actual) return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
expected, actual = formatUnequalValues(expected, actual) " != %#v (actual)", expected, actual), msgAndArgs...)
return Fail(t, fmt.Sprintf("Not equal: \n"+
"expected: %s\n"+
"received: %s%s", expected, actual, diff), msgAndArgs...)
} }
return true return true
@ -882,7 +882,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err != nil { if err != nil {
return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) return Fail(t, fmt.Sprintf("Received unexpected error %+v", err), msgAndArgs...)
} }
return true return true
@ -913,18 +913,14 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
if !Error(t, theError, msgAndArgs...) {
message := messageFromMsgAndArgs(msgAndArgs...)
if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
return false return false
} }
expected := errString s := "An error with value \"%s\" is expected but got \"%s\". %s"
actual := theError.Error() return Equal(t, errString, theError.Error(),
// don't need to use deep equals here, we know they are both strings s, errString, theError.Error(), message)
if expected != actual {
return Fail(t, fmt.Sprintf("Error message not equal:\n"+
"expected: %q\n"+
"received: %q", expected, actual), msgAndArgs...)
}
return true
} }
// matchRegexp return true if a specified regexp matches a string. // matchRegexp return true if a specified regexp matches a string.
@ -1039,8 +1035,8 @@ func diff(expected interface{}, actual interface{}) string {
return "" return ""
} }
e := spewConfig.Sdump(expected) e := spew.Sdump(expected)
a := spewConfig.Sdump(actual) a := spew.Sdump(actual)
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
A: difflib.SplitLines(e), A: difflib.SplitLines(e),
@ -1054,10 +1050,3 @@ func diff(expected interface{}, actual interface{}) string {
return "\n\nDiff:\n" + diff return "\n\nDiff:\n" + diff
} }
var spewConfig = spew.ConfigState{
Indent: " ",
DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
}

View File

@ -6,12 +6,14 @@
package require package require
import ( import (
assert "github.com/stretchr/testify/assert" assert "github.com/stretchr/testify/assert"
http "net/http" http "net/http"
url "net/url" url "net/url"
time "time" time "time"
) )
// Condition uses a Comparison to assert a complex condition. // Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
if !assert.Condition(t, comp, msgAndArgs...) { if !assert.Condition(t, comp, msgAndArgs...) {
@ -19,6 +21,7 @@ func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
} }
} }
// Contains asserts that the specified string, list(array, slice...) or map contains the // Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element. // specified substring or element.
// //
@ -33,6 +36,7 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int
} }
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0. // a slice or a channel with len == 0.
// //
@ -45,6 +49,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
} }
} }
// Equal asserts that two objects are equal. // Equal asserts that two objects are equal.
// //
// assert.Equal(t, 123, 123, "123 and 123 should be equal") // assert.Equal(t, 123, 123, "123 and 123 should be equal")
@ -56,11 +61,14 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i
} }
} }
// EqualError asserts that a function returned an error (i.e. not `nil`) // EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error. // and that it is equal to the provided error.
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// assert.EqualError(t, err, expectedErrorString, "An error was expected") // if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
@ -69,6 +77,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
} }
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertable to the same types
// and equal. // and equal.
// //
@ -81,6 +90,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
} }
} }
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -95,6 +105,7 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) {
} }
} }
// Exactly asserts that two objects are equal is value and type. // Exactly asserts that two objects are equal is value and type.
// //
// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") // assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
@ -106,6 +117,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ..
} }
} }
// Fail reports a failure through // Fail reports a failure through
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
if !assert.Fail(t, failureMessage, msgAndArgs...) { if !assert.Fail(t, failureMessage, msgAndArgs...) {
@ -113,6 +125,7 @@ func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
} }
} }
// FailNow fails test // FailNow fails test
func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
if !assert.FailNow(t, failureMessage, msgAndArgs...) { if !assert.FailNow(t, failureMessage, msgAndArgs...) {
@ -120,6 +133,7 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
} }
} }
// False asserts that the specified value is false. // False asserts that the specified value is false.
// //
// assert.False(t, myBool, "myBool should be false") // assert.False(t, myBool, "myBool should be false")
@ -131,6 +145,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) {
} }
} }
// HTTPBodyContains asserts that a specified handler returns a // HTTPBodyContains asserts that a specified handler returns a
// body that contains a string. // body that contains a string.
// //
@ -143,6 +158,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s
} }
} }
// HTTPBodyNotContains asserts that a specified handler returns a // HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string. // body that does not contain a string.
// //
@ -155,6 +171,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur
} }
} }
// HTTPError asserts that a specified handler returns an error status code. // HTTPError asserts that a specified handler returns an error status code.
// //
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@ -166,6 +183,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string,
} }
} }
// HTTPRedirect asserts that a specified handler returns a redirect status code. // HTTPRedirect asserts that a specified handler returns a redirect status code.
// //
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@ -177,6 +195,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin
} }
} }
// HTTPSuccess asserts that a specified handler returns a success status code. // HTTPSuccess asserts that a specified handler returns a success status code.
// //
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
@ -188,6 +207,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string
} }
} }
// Implements asserts that an object is implemented by the specified interface. // Implements asserts that an object is implemented by the specified interface.
// //
// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") // assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
@ -197,6 +217,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
} }
} }
// InDelta asserts that the two numerals are within delta of each other. // InDelta asserts that the two numerals are within delta of each other.
// //
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
@ -208,6 +229,7 @@ func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64
} }
} }
// InDeltaSlice is the same as InDelta, except it compares two slices. // InDeltaSlice is the same as InDelta, except it compares two slices.
func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
@ -215,6 +237,7 @@ func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta fl
} }
} }
// InEpsilon asserts that expected and actual have a relative error less than epsilon // InEpsilon asserts that expected and actual have a relative error less than epsilon
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
@ -224,13 +247,15 @@ func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon flo
} }
} }
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { // InEpsilonSlice is the same as InEpsilon, except it compares two slices.
if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) {
t.FailNow() t.FailNow()
} }
} }
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if !assert.IsType(t, expectedType, object, msgAndArgs...) { if !assert.IsType(t, expectedType, object, msgAndArgs...) {
@ -238,6 +263,7 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
} }
} }
// JSONEq asserts that two JSON strings are equivalent. // JSONEq asserts that two JSON strings are equivalent.
// //
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@ -249,6 +275,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
} }
} }
// Len asserts that the specified object has specific length. // Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept. // Len also fails if the object has a type that len() not accept.
// //
@ -261,6 +288,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
} }
} }
// Nil asserts that the specified object is nil. // Nil asserts that the specified object is nil.
// //
// assert.Nil(t, err, "err should be nothing") // assert.Nil(t, err, "err should be nothing")
@ -272,6 +300,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
} }
} }
// NoError asserts that a function returned no error (i.e. `nil`). // NoError asserts that a function returned no error (i.e. `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -286,6 +315,7 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
} }
} }
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element. // specified substring or element.
// //
@ -300,6 +330,7 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...
} }
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0. // a slice or a channel with len == 0.
// //
@ -314,6 +345,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
} }
} }
// NotEqual asserts that the specified values are NOT equal. // NotEqual asserts that the specified values are NOT equal.
// //
// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") // assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
@ -325,6 +357,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs .
} }
} }
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
// //
// assert.NotNil(t, err, "err should be something") // assert.NotNil(t, err, "err should be something")
@ -336,6 +369,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
} }
} }
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
// //
// assert.NotPanics(t, func(){ // assert.NotPanics(t, func(){
@ -349,6 +383,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
} }
} }
// NotRegexp asserts that a specified regexp does not match a string. // NotRegexp asserts that a specified regexp does not match a string.
// //
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
@ -361,6 +396,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
} }
} }
// NotZero asserts that i is not the zero value for its type and returns the truth. // NotZero asserts that i is not the zero value for its type and returns the truth.
func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
if !assert.NotZero(t, i, msgAndArgs...) { if !assert.NotZero(t, i, msgAndArgs...) {
@ -368,6 +404,7 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
} }
} }
// Panics asserts that the code inside the specified PanicTestFunc panics. // Panics asserts that the code inside the specified PanicTestFunc panics.
// //
// assert.Panics(t, func(){ // assert.Panics(t, func(){
@ -381,6 +418,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
} }
} }
// Regexp asserts that a specified regexp matches a string. // Regexp asserts that a specified regexp matches a string.
// //
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
@ -393,6 +431,7 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface
} }
} }
// True asserts that the specified value is true. // True asserts that the specified value is true.
// //
// assert.True(t, myBool, "myBool should be true") // assert.True(t, myBool, "myBool should be true")
@ -404,6 +443,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) {
} }
} }
// WithinDuration asserts that the two times are within duration delta of each other. // WithinDuration asserts that the two times are within duration delta of each other.
// //
// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") // assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
@ -415,6 +455,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time
} }
} }
// Zero asserts that i is the zero value for its type and returns the truth. // Zero asserts that i is the zero value for its type and returns the truth.
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
if !assert.Zero(t, i, msgAndArgs...) { if !assert.Zero(t, i, msgAndArgs...) {

View File

@ -6,17 +6,20 @@
package require package require
import ( import (
assert "github.com/stretchr/testify/assert" assert "github.com/stretchr/testify/assert"
http "net/http" http "net/http"
url "net/url" url "net/url"
time "time" time "time"
) )
// Condition uses a Comparison to assert a complex condition. // Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
Condition(a.t, comp, msgAndArgs...) Condition(a.t, comp, msgAndArgs...)
} }
// Contains asserts that the specified string, list(array, slice...) or map contains the // Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element. // specified substring or element.
// //
@ -29,6 +32,7 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ..
Contains(a.t, s, contains, msgAndArgs...) Contains(a.t, s, contains, msgAndArgs...)
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0. // a slice or a channel with len == 0.
// //
@ -39,6 +43,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
Empty(a.t, object, msgAndArgs...) Empty(a.t, object, msgAndArgs...)
} }
// Equal asserts that two objects are equal. // Equal asserts that two objects are equal.
// //
// a.Equal(123, 123, "123 and 123 should be equal") // a.Equal(123, 123, "123 and 123 should be equal")
@ -48,17 +53,21 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs
Equal(a.t, expected, actual, msgAndArgs...) Equal(a.t, expected, actual, msgAndArgs...)
} }
// EqualError asserts that a function returned an error (i.e. not `nil`) // EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error. // and that it is equal to the provided error.
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// a.EqualError(err, expectedErrorString, "An error was expected") // if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
EqualError(a.t, theError, errString, msgAndArgs...) EqualError(a.t, theError, errString, msgAndArgs...)
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertable to the same types
// and equal. // and equal.
// //
@ -69,6 +78,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
EqualValues(a.t, expected, actual, msgAndArgs...) EqualValues(a.t, expected, actual, msgAndArgs...)
} }
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -81,6 +91,7 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
Error(a.t, err, msgAndArgs...) Error(a.t, err, msgAndArgs...)
} }
// Exactly asserts that two objects are equal is value and type. // Exactly asserts that two objects are equal is value and type.
// //
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") // a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
@ -90,16 +101,19 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
Exactly(a.t, expected, actual, msgAndArgs...) Exactly(a.t, expected, actual, msgAndArgs...)
} }
// Fail reports a failure through // Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
Fail(a.t, failureMessage, msgAndArgs...) Fail(a.t, failureMessage, msgAndArgs...)
} }
// FailNow fails test // FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
FailNow(a.t, failureMessage, msgAndArgs...) FailNow(a.t, failureMessage, msgAndArgs...)
} }
// False asserts that the specified value is false. // False asserts that the specified value is false.
// //
// a.False(myBool, "myBool should be false") // a.False(myBool, "myBool should be false")
@ -109,6 +123,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
False(a.t, value, msgAndArgs...) False(a.t, value, msgAndArgs...)
} }
// HTTPBodyContains asserts that a specified handler returns a // HTTPBodyContains asserts that a specified handler returns a
// body that contains a string. // body that contains a string.
// //
@ -119,6 +134,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
HTTPBodyContains(a.t, handler, method, url, values, str) HTTPBodyContains(a.t, handler, method, url, values, str)
} }
// HTTPBodyNotContains asserts that a specified handler returns a // HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string. // body that does not contain a string.
// //
@ -129,6 +145,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
HTTPBodyNotContains(a.t, handler, method, url, values, str) HTTPBodyNotContains(a.t, handler, method, url, values, str)
} }
// HTTPError asserts that a specified handler returns an error status code. // HTTPError asserts that a specified handler returns an error status code.
// //
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@ -138,6 +155,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
HTTPError(a.t, handler, method, url, values) HTTPError(a.t, handler, method, url, values)
} }
// HTTPRedirect asserts that a specified handler returns a redirect status code. // HTTPRedirect asserts that a specified handler returns a redirect status code.
// //
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@ -147,6 +165,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
HTTPRedirect(a.t, handler, method, url, values) HTTPRedirect(a.t, handler, method, url, values)
} }
// HTTPSuccess asserts that a specified handler returns a success status code. // HTTPSuccess asserts that a specified handler returns a success status code.
// //
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@ -156,6 +175,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st
HTTPSuccess(a.t, handler, method, url, values) HTTPSuccess(a.t, handler, method, url, values)
} }
// Implements asserts that an object is implemented by the specified interface. // Implements asserts that an object is implemented by the specified interface.
// //
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") // a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
@ -163,6 +183,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
Implements(a.t, interfaceObject, object, msgAndArgs...) Implements(a.t, interfaceObject, object, msgAndArgs...)
} }
// InDelta asserts that the two numerals are within delta of each other. // InDelta asserts that the two numerals are within delta of each other.
// //
// a.InDelta(math.Pi, (22 / 7.0), 0.01) // a.InDelta(math.Pi, (22 / 7.0), 0.01)
@ -172,11 +193,13 @@ func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta flo
InDelta(a.t, expected, actual, delta, msgAndArgs...) InDelta(a.t, expected, actual, delta, msgAndArgs...)
} }
// InDeltaSlice is the same as InDelta, except it compares two slices. // InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
} }
// InEpsilon asserts that expected and actual have a relative error less than epsilon // InEpsilon asserts that expected and actual have a relative error less than epsilon
// //
// Returns whether the assertion was successful (true) or not (false). // Returns whether the assertion was successful (true) or not (false).
@ -184,16 +207,19 @@ func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon
InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
} }
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { // InEpsilonSlice is the same as InEpsilon, except it compares two slices.
InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
} }
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
IsType(a.t, expectedType, object, msgAndArgs...) IsType(a.t, expectedType, object, msgAndArgs...)
} }
// JSONEq asserts that two JSON strings are equivalent. // JSONEq asserts that two JSON strings are equivalent.
// //
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@ -203,6 +229,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf
JSONEq(a.t, expected, actual, msgAndArgs...) JSONEq(a.t, expected, actual, msgAndArgs...)
} }
// Len asserts that the specified object has specific length. // Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept. // Len also fails if the object has a type that len() not accept.
// //
@ -213,6 +240,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface
Len(a.t, object, length, msgAndArgs...) Len(a.t, object, length, msgAndArgs...)
} }
// Nil asserts that the specified object is nil. // Nil asserts that the specified object is nil.
// //
// a.Nil(err, "err should be nothing") // a.Nil(err, "err should be nothing")
@ -222,6 +250,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
Nil(a.t, object, msgAndArgs...) Nil(a.t, object, msgAndArgs...)
} }
// NoError asserts that a function returned no error (i.e. `nil`). // NoError asserts that a function returned no error (i.e. `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -234,6 +263,7 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
NoError(a.t, err, msgAndArgs...) NoError(a.t, err, msgAndArgs...)
} }
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element. // specified substring or element.
// //
@ -246,6 +276,7 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs
NotContains(a.t, s, contains, msgAndArgs...) NotContains(a.t, s, contains, msgAndArgs...)
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0. // a slice or a channel with len == 0.
// //
@ -258,6 +289,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
NotEmpty(a.t, object, msgAndArgs...) NotEmpty(a.t, object, msgAndArgs...)
} }
// NotEqual asserts that the specified values are NOT equal. // NotEqual asserts that the specified values are NOT equal.
// //
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") // a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
@ -267,6 +299,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
NotEqual(a.t, expected, actual, msgAndArgs...) NotEqual(a.t, expected, actual, msgAndArgs...)
} }
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
// //
// a.NotNil(err, "err should be something") // a.NotNil(err, "err should be something")
@ -276,6 +309,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
NotNil(a.t, object, msgAndArgs...) NotNil(a.t, object, msgAndArgs...)
} }
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
// //
// a.NotPanics(func(){ // a.NotPanics(func(){
@ -287,6 +321,7 @@ func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}
NotPanics(a.t, f, msgAndArgs...) NotPanics(a.t, f, msgAndArgs...)
} }
// NotRegexp asserts that a specified regexp does not match a string. // NotRegexp asserts that a specified regexp does not match a string.
// //
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
@ -297,11 +332,13 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
NotRegexp(a.t, rx, str, msgAndArgs...) NotRegexp(a.t, rx, str, msgAndArgs...)
} }
// NotZero asserts that i is not the zero value for its type and returns the truth. // NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
NotZero(a.t, i, msgAndArgs...) NotZero(a.t, i, msgAndArgs...)
} }
// Panics asserts that the code inside the specified PanicTestFunc panics. // Panics asserts that the code inside the specified PanicTestFunc panics.
// //
// a.Panics(func(){ // a.Panics(func(){
@ -313,6 +350,7 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
Panics(a.t, f, msgAndArgs...) Panics(a.t, f, msgAndArgs...)
} }
// Regexp asserts that a specified regexp matches a string. // Regexp asserts that a specified regexp matches a string.
// //
// a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp(regexp.MustCompile("start"), "it's starting")
@ -323,6 +361,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
Regexp(a.t, rx, str, msgAndArgs...) Regexp(a.t, rx, str, msgAndArgs...)
} }
// True asserts that the specified value is true. // True asserts that the specified value is true.
// //
// a.True(myBool, "myBool should be true") // a.True(myBool, "myBool should be true")
@ -332,6 +371,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
True(a.t, value, msgAndArgs...) True(a.t, value, msgAndArgs...)
} }
// WithinDuration asserts that the two times are within duration delta of each other. // WithinDuration asserts that the two times are within duration delta of each other.
// //
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") // a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
@ -341,6 +381,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta
WithinDuration(a.t, expected, actual, delta, msgAndArgs...) WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
} }
// Zero asserts that i is the zero value for its type and returns the truth. // Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
Zero(a.t, i, msgAndArgs...) Zero(a.t, i, msgAndArgs...)

View File

@ -27,7 +27,17 @@ func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
if client == nil { if client == nil {
client = http.DefaultClient client = http.DefaultClient
} }
return client.Do(req.WithContext(ctx)) resp, err := client.Do(req.WithContext(ctx))
// If we got an error, and the context has been canceled,
// the context's error is probably more useful.
if err != nil {
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
}
return resp, err
} }
// Get issues a GET request via the Do function. // Get issues a GET request via the Do function.

View File

@ -53,13 +53,13 @@ const (
) )
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
if req.Close && dialOnMiss { if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection. // It gets its own connection.
cc, err := p.t.dialClientConn(addr) const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cc.singleUse = true
return cc, nil return cc, nil
} }
p.mu.Lock() p.mu.Lock()
@ -104,7 +104,8 @@ func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
// run in its own goroutine. // run in its own goroutine.
func (c *dialCall) dial(addr string) { func (c *dialCall) dial(addr string) {
c.res, c.err = c.p.t.dialClientConn(addr) const singleUse = false // shared conn
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
close(c.done) close(c.done)
c.p.mu.Lock() c.p.mu.Lock()

View File

@ -64,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error:
type StreamError struct { type StreamError struct {
StreamID uint32 StreamID uint32
Code ErrCode Code ErrCode
Cause error // optional additional detail
}
func streamError(id uint32, code ErrCode) StreamError {
return StreamError{StreamID: id, Code: code}
} }
func (e StreamError) Error() string { func (e StreamError) Error() string {
if e.Cause != nil {
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
}
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
} }

View File

@ -594,6 +594,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
var ( var (
errStreamID = errors.New("invalid stream ID") errStreamID = errors.New("invalid stream ID")
errDepStreamID = errors.New("invalid dependent stream ID") errDepStreamID = errors.New("invalid dependent stream ID")
errPadLength = errors.New("pad length too large")
) )
func validStreamIDOrZero(streamID uint32) bool { func validStreamIDOrZero(streamID uint32) bool {
@ -607,18 +608,40 @@ func validStreamID(streamID uint32) bool {
// WriteData writes a DATA frame. // WriteData writes a DATA frame.
// //
// It will perform exactly one Write to the underlying Writer. // It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently. // It is the caller's responsibility not to violate the maximum frame size
// and to not call other Write methods concurrently.
func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
// TODO: ignoring padding for now. will add when somebody cares. return f.WriteDataPadded(streamID, endStream, data, nil)
}
// WriteData writes a DATA frame with optional padding.
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
// and to not call other Write methods concurrently.
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
if !validStreamID(streamID) && !f.AllowIllegalWrites { if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID return errStreamID
} }
if len(pad) > 255 {
return errPadLength
}
var flags Flags var flags Flags
if endStream { if endStream {
flags |= FlagDataEndStream flags |= FlagDataEndStream
} }
if pad != nil {
flags |= FlagDataPadded
}
f.startWrite(FrameData, flags, streamID) f.startWrite(FrameData, flags, streamID)
if pad != nil {
f.wbuf = append(f.wbuf, byte(len(pad)))
}
f.wbuf = append(f.wbuf, data...) f.wbuf = append(f.wbuf, data...)
f.wbuf = append(f.wbuf, pad...)
return f.endWrite() return f.endWrite()
} }
@ -714,7 +737,7 @@ func (f *Framer) WriteSettings(settings ...Setting) error {
return f.endWrite() return f.endWrite()
} }
// WriteSettings writes an empty SETTINGS frame with the ACK bit set. // WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
// //
// It will perform exactly one Write to the underlying Writer. // It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently. // It is the caller's responsibility to not call other Write methods concurrently.
@ -840,7 +863,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 { if fh.StreamID == 0 {
return nil, ConnectionError(ErrCodeProtocol) return nil, ConnectionError(ErrCodeProtocol)
} }
return nil, StreamError{fh.StreamID, ErrCodeProtocol} return nil, streamError(fh.StreamID, ErrCodeProtocol)
} }
return &WindowUpdateFrame{ return &WindowUpdateFrame{
FrameHeader: fh, FrameHeader: fh,
@ -921,7 +944,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
} }
} }
if len(p)-int(padLength) <= 0 { if len(p)-int(padLength) <= 0 {
return nil, StreamError{fh.StreamID, ErrCodeProtocol} return nil, streamError(fh.StreamID, ErrCodeProtocol)
} }
hf.headerFragBuf = p[:len(p)-int(padLength)] hf.headerFragBuf = p[:len(p)-int(padLength)]
return hf, nil return hf, nil
@ -1396,6 +1419,9 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
hdec.SetEmitEnabled(true) hdec.SetEmitEnabled(true)
hdec.SetMaxStringLength(fr.maxHeaderStringLen()) hdec.SetMaxStringLength(fr.maxHeaderStringLen())
hdec.SetEmitFunc(func(hf hpack.HeaderField) { hdec.SetEmitFunc(func(hf hpack.HeaderField) {
if VerboseLogs && logFrameReads {
log.Printf("http2: decoded hpack field %+v", hf)
}
if !httplex.ValidHeaderFieldValue(hf.Value) { if !httplex.ValidHeaderFieldValue(hf.Value) {
invalid = headerFieldValueError(hf.Value) invalid = headerFieldValueError(hf.Value)
} }
@ -1454,11 +1480,17 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
} }
if invalid != nil { if invalid != nil {
fr.errDetail = invalid fr.errDetail = invalid
return nil, StreamError{mh.StreamID, ErrCodeProtocol} if VerboseLogs {
log.Printf("http2: invalid header: %v", invalid)
}
return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
} }
if err := mh.checkPseudos(); err != nil { if err := mh.checkPseudos(); err != nil {
fr.errDetail = err fr.errDetail = err
return nil, StreamError{mh.StreamID, ErrCodeProtocol} if VerboseLogs {
log.Printf("http2: invalid pseudo headers: %v", err)
}
return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
} }
return mh, nil return mh, nil
} }

View File

@ -39,6 +39,13 @@ type clientTrace httptrace.ClientTrace
func reqContext(r *http.Request) context.Context { return r.Context() } func reqContext(r *http.Request) context.Context { return r.Context() }
func (t *Transport) idleConnTimeout() time.Duration {
if t.t1 != nil {
return t.t1.IdleConnTimeout
}
return 0
}
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
func traceGotConn(req *http.Request, cc *ClientConn) { func traceGotConn(req *http.Request, cc *ClientConn) {
@ -92,3 +99,8 @@ func requestTrace(req *http.Request) *clientTrace {
trace := httptrace.ContextClientTrace(req.Context()) trace := httptrace.ContextClientTrace(req.Context())
return (*clientTrace)(trace) return (*clientTrace)(trace)
} }
// Ping sends a PING frame to the server and waits for the ack.
func (cc *ClientConn) Ping(ctx context.Context) error {
return cc.ping(ctx)
}

36
vendor/golang.org/x/net/http2/go17_not18.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,!go1.8
package http2
import "crypto/tls"
// temporary copy of Go 1.7's private tls.Config.clone:
func cloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
}
}

11
vendor/golang.org/x/net/http2/go18.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package http2
import "crypto/tls"
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }

View File

@ -57,7 +57,7 @@ func (hf HeaderField) String() string {
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
} }
// Size returns the size of an entry per RFC 7540 section 5.2. // Size returns the size of an entry per RFC 7541 section 4.1.
func (hf HeaderField) Size() uint32 { func (hf HeaderField) Size() uint32 {
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of // "The size of the dynamic table is the sum of the size of

View File

@ -13,6 +13,7 @@
// See https://http2.github.io/ for more information on HTTP/2. // See https://http2.github.io/ for more information on HTTP/2.
// //
// See https://http2.golang.org/ for a test server running this code. // See https://http2.golang.org/ for a test server running this code.
//
package http2 package http2
import ( import (
@ -342,10 +343,23 @@ func (s *sorter) Keys(h http.Header) []string {
} }
func (s *sorter) SortStrings(ss []string) { func (s *sorter) SortStrings(ss []string) {
// Our sorter works on s.v, which sorter owners, so // Our sorter works on s.v, which sorter owns, so
// stash it away while we sort the user's buffer. // stash it away while we sort the user's buffer.
save := s.v save := s.v
s.v = ss s.v = ss
sort.Sort(s) sort.Sort(s)
s.v = save s.v = save
} }
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// *) a non-empty string starting with '/', but not with with "//",
// *) the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
}

View File

@ -7,11 +7,16 @@
package http2 package http2
import ( import (
"crypto/tls"
"net" "net"
"net/http" "net/http"
"time"
) )
type contextContext interface{} type contextContext interface {
Done() <-chan struct{}
Err() error
}
type fakeContext struct{} type fakeContext struct{}
@ -49,3 +54,34 @@ func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
func requestWithContext(req *http.Request, ctx contextContext) *http.Request { func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
return req return req
} }
// temporary copy of Go 1.6's private tls.Config.clone:
func cloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
}
}
func (cc *ClientConn) Ping(ctx contextContext) error {
return cc.ping(ctx)
}
func (t *Transport) idleConnTimeout() time.Duration { return 0 }

View File

@ -922,7 +922,7 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// state here anyway, after telling the peer // state here anyway, after telling the peer
// we're hanging up on them. // we're hanging up on them.
st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
errCancel := StreamError{st.id, ErrCodeCancel} errCancel := streamError(st.id, ErrCodeCancel)
sc.resetStream(errCancel) sc.resetStream(errCancel)
case stateHalfClosedRemote: case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete) sc.closeStream(st, errHandlerComplete)
@ -1133,7 +1133,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
return nil return nil
} }
if !st.flow.add(int32(f.Increment)) { if !st.flow.add(int32(f.Increment)) {
return StreamError{f.StreamID, ErrCodeFlowControl} return streamError(f.StreamID, ErrCodeFlowControl)
} }
default: // connection-level flow control default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) { if !sc.flow.add(int32(f.Increment)) {
@ -1159,7 +1159,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
if st != nil { if st != nil {
st.gotReset = true st.gotReset = true
st.cancelCtx() st.cancelCtx()
sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
} }
return nil return nil
} }
@ -1176,6 +1176,10 @@ func (sc *serverConn) closeStream(st *stream, err error) {
} }
delete(sc.streams, st.id) delete(sc.streams, st.id)
if p := st.body; p != nil { if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
// See golang.org/issue/16481
sc.sendWindowUpdate(nil, p.Len())
p.CloseWithError(err) p.CloseWithError(err)
} }
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
@ -1277,6 +1281,8 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
func (sc *serverConn) processData(f *DataFrame) error { func (sc *serverConn) processData(f *DataFrame) error {
sc.serveG.check() sc.serveG.check()
data := f.Data()
// "If a DATA frame is received whose stream is not in "open" // "If a DATA frame is received whose stream is not in "open"
// or "half closed (local)" state, the recipient MUST respond // or "half closed (local)" state, the recipient MUST respond
// with a stream error (Section 5.4.2) of type STREAM_CLOSED." // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
@ -1288,33 +1294,56 @@ func (sc *serverConn) processData(f *DataFrame) error {
// the http.Handler returned, so it's done reading & // the http.Handler returned, so it's done reading &
// done writing). Try to stop the client from sending // done writing). Try to stop the client from sending
// more DATA. // more DATA.
return StreamError{id, ErrCodeStreamClosed}
// But still enforce their connection-level flow control,
// and return any flow control bytes since we're not going
// to consume them.
if sc.inflow.available() < int32(f.Length) {
return streamError(id, ErrCodeFlowControl)
}
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
// sendWindowUpdate, which also schedules sending the
// frames.
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
return streamError(id, ErrCodeStreamClosed)
} }
if st.body == nil { if st.body == nil {
panic("internal error: should have a body in this state") panic("internal error: should have a body in this state")
} }
data := f.Data()
// Sender sending more than they'd declared? // Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
return StreamError{id, ErrCodeStreamClosed} return streamError(id, ErrCodeStreamClosed)
} }
if len(data) > 0 { if f.Length > 0 {
// Check whether the client has flow control quota. // Check whether the client has flow control quota.
if int(st.inflow.available()) < len(data) { if st.inflow.available() < int32(f.Length) {
return StreamError{id, ErrCodeFlowControl} return streamError(id, ErrCodeFlowControl)
} }
st.inflow.take(int32(len(data))) st.inflow.take(int32(f.Length))
if len(data) > 0 {
wrote, err := st.body.Write(data) wrote, err := st.body.Write(data)
if err != nil { if err != nil {
return StreamError{id, ErrCodeStreamClosed} return streamError(id, ErrCodeStreamClosed)
} }
if wrote != len(data) { if wrote != len(data) {
panic("internal error: bad Writer") panic("internal error: bad Writer")
} }
st.bodyBytes += int64(len(data)) st.bodyBytes += int64(len(data))
} }
// Return any padded flow control now, since we won't
// refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
sc.sendWindowUpdate32(nil, pad)
sc.sendWindowUpdate32(st, pad)
}
}
if f.StreamEnded() { if f.StreamEnded() {
st.endStream() st.endStream()
} }
@ -1417,14 +1446,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// REFUSED_STREAM." // REFUSED_STREAM."
if sc.unackedSettings == 0 { if sc.unackedSettings == 0 {
// They should know better. // They should know better.
return StreamError{st.id, ErrCodeProtocol} return streamError(st.id, ErrCodeProtocol)
} }
// Assume it's a network race, where they just haven't // Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually // received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide // this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at // a way for users to adjust server parameters at
// runtime. // runtime.
return StreamError{st.id, ErrCodeRefusedStream} return streamError(st.id, ErrCodeRefusedStream)
} }
rw, req, err := sc.newWriterAndRequest(st, f) rw, req, err := sc.newWriterAndRequest(st, f)
@ -1446,6 +1475,19 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
handler = new400Handler(err) handler = new400Handler(err)
} }
// The net/http package sets the read deadline from the
// http.Server.ReadTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already
// set. Disarm it here after the request headers are read, similar
// to how the http1 server works.
// Unlike http1, though, we never re-arm it yet, though.
// TODO(bradfitz): figure out golang.org/issue/14204
// (IdleTimeout) and how this relates. Maybe the default
// IdleTimeout is ReadTimeout.
if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{})
}
go sc.runHandler(rw, req, handler) go sc.runHandler(rw, req, handler)
return nil return nil
} }
@ -1458,11 +1500,11 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
} }
st.gotTrailerHeader = true st.gotTrailerHeader = true
if !f.StreamEnded() { if !f.StreamEnded() {
return StreamError{st.id, ErrCodeProtocol} return streamError(st.id, ErrCodeProtocol)
} }
if len(f.PseudoFields()) > 0 { if len(f.PseudoFields()) > 0 {
return StreamError{st.id, ErrCodeProtocol} return streamError(st.id, ErrCodeProtocol)
} }
if st.trailer != nil { if st.trailer != nil {
for _, hf := range f.RegularFields() { for _, hf := range f.RegularFields() {
@ -1471,7 +1513,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
// TODO: send more details to the peer somehow. But http2 has // TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with // no way to send debug data at a stream level. Discuss with
// HTTP folk. // HTTP folk.
return StreamError{st.id, ErrCodeProtocol} return streamError(st.id, ErrCodeProtocol)
} }
st.trailer[key] = append(st.trailer[key], hf.Value) st.trailer[key] = append(st.trailer[key], hf.Value)
} }
@ -1532,7 +1574,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
isConnect := method == "CONNECT" isConnect := method == "CONNECT"
if isConnect { if isConnect {
if path != "" || scheme != "" || authority == "" { if path != "" || scheme != "" || authority == "" {
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
} }
} else if method == "" || path == "" || } else if method == "" || path == "" ||
(scheme != "https" && scheme != "http") { (scheme != "https" && scheme != "http") {
@ -1546,13 +1588,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
// "All HTTP/2 requests MUST include exactly one valid // "All HTTP/2 requests MUST include exactly one valid
// value for the :method, :scheme, and :path // value for the :method, :scheme, and :path
// pseudo-header fields" // pseudo-header fields"
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
} }
bodyOpen := !f.StreamEnded() bodyOpen := !f.StreamEnded()
if method == "HEAD" && bodyOpen { if method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies // HEAD requests can't have bodies
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
} }
var tlsState *tls.ConnectionState // nil if not scheme https var tlsState *tls.ConnectionState // nil if not scheme https
@ -1610,7 +1652,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
var err error var err error
url_, err = url.ParseRequestURI(path) url_, err = url.ParseRequestURI(path)
if err != nil { if err != nil {
return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
} }
requestURI = path requestURI = path
} }

View File

@ -10,12 +10,14 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"crypto/rand"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
"math"
"net" "net"
"net/http" "net/http"
"sort" "sort"
@ -25,6 +27,7 @@ import (
"time" "time"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
"golang.org/x/net/lex/httplex" "golang.org/x/net/lex/httplex"
) )
@ -148,24 +151,29 @@ type ClientConn struct {
readerDone chan struct{} // closed on error readerDone chan struct{} // closed on error
readerErr error // set before readerDone is closed readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
idleTimer *time.Timer
mu sync.Mutex // guards following mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream) flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control inflow flow // peer's conn-level flow control
closed bool closed bool
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated streams map[uint32]*clientStream // client-initiated
nextStreamID uint32 nextStreamID uint32
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer bw *bufio.Writer
br *bufio.Reader br *bufio.Reader
fr *Framer fr *Framer
lastActive time.Time lastActive time.Time
// Settings from peer: (also guarded by mu)
// Settings from peer:
maxFrameSize uint32 maxFrameSize uint32
maxConcurrentStreams uint32 maxConcurrentStreams uint32
initialWindowSize uint32 initialWindowSize uint32
hbuf bytes.Buffer // HPACK encoder writes into this hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder henc *hpack.Encoder
freeBuf [][]byte freeBuf [][]byte
@ -283,14 +291,18 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) // authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
// and returns a host:port. The port 443 is added if needed. // and returns a host:port. The port 443 is added if needed.
func authorityAddr(scheme string, authority string) (addr string) { func authorityAddr(scheme string, authority string) (addr string) {
if _, _, err := net.SplitHostPort(authority); err == nil { host, port, err := net.SplitHostPort(authority)
return authority if err != nil { // authority didn't have a port
} port = "443"
port := "443"
if scheme == "http" { if scheme == "http" {
port = "80" port = "80"
} }
return net.JoinHostPort(authority, port) host = authority
}
if a, err := idna.ToASCII(host); err == nil {
host = a
}
return net.JoinHostPort(host, port)
} }
// RoundTripOpt is like RoundTrip, but takes options. // RoundTripOpt is like RoundTrip, but takes options.
@ -339,7 +351,7 @@ func shouldRetryRequest(req *http.Request, err error) bool {
return err == errClientConnUnusable return err == errClientConnUnusable
} }
func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
host, _, err := net.SplitHostPort(addr) host, _, err := net.SplitHostPort(addr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -348,13 +360,13 @@ func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return t.NewClientConn(tconn) return t.newClientConn(tconn, singleUse)
} }
func (t *Transport) newTLSConfig(host string) *tls.Config { func (t *Transport) newTLSConfig(host string) *tls.Config {
cfg := new(tls.Config) cfg := new(tls.Config)
if t.TLSClientConfig != nil { if t.TLSClientConfig != nil {
*cfg = *t.TLSClientConfig *cfg = *cloneTLSConfig(t.TLSClientConfig)
} }
if !strSliceContains(cfg.NextProtos, NextProtoTLS) { if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
@ -409,14 +421,10 @@ func (t *Transport) expectContinueTimeout() time.Duration {
} }
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
if VerboseLogs { return t.newClientConn(c, false)
t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
}
if _, err := c.Write(clientPreface); err != nil {
t.vlogf("client preface write error: %v", err)
return nil, err
} }
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
cc := &ClientConn{ cc := &ClientConn{
t: t, t: t,
tconn: c, tconn: c,
@ -426,7 +434,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
initialWindowSize: 65535, // spec default initialWindowSize: 65535, // spec default
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
streams: make(map[uint32]*clientStream), streams: make(map[uint32]*clientStream),
singleUse: singleUse,
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
} }
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
}
cc.cond = sync.NewCond(&cc.mu) cc.cond = sync.NewCond(&cc.mu)
cc.flow.add(int32(initialWindowSize)) cc.flow.add(int32(initialWindowSize))
@ -454,6 +473,8 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
if max := t.maxHeaderListSize(); max != 0 { if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
} }
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...) cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
@ -462,33 +483,6 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return nil, cc.werr return nil, cc.werr
} }
// Read the obligatory SETTINGS frame
f, err := cc.fr.ReadFrame()
if err != nil {
return nil, err
}
sf, ok := f.(*SettingsFrame)
if !ok {
return nil, fmt.Errorf("expected settings frame, got: %T", f)
}
cc.fr.WriteSettingsAck()
cc.bw.Flush()
sf.ForeachSetting(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
case SettingInitialWindowSize:
cc.initialWindowSize = s.Val
default:
// TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
t.vlogf("Unhandled Setting: %v", s)
}
return nil
})
go cc.readLoop() go cc.readLoop()
return cc, nil return cc, nil
} }
@ -521,7 +515,17 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
} }
return cc.goAway == nil && !cc.closed && return cc.goAway == nil && !cc.closed &&
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
cc.nextStreamID < 2147483647 cc.nextStreamID < math.MaxInt32
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this
// connection. The timer could just call closeIfIdle, but this is more
// clear.
func (cc *ClientConn) onIdleTimeout() {
cc.closeIfIdle()
} }
func (cc *ClientConn) closeIfIdle() { func (cc *ClientConn) closeIfIdle() {
@ -531,9 +535,13 @@ func (cc *ClientConn) closeIfIdle() {
return return
} }
cc.closed = true cc.closed = true
nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close: // TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock() cc.mu.Unlock()
if VerboseLogs {
cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
}
cc.tconn.Close() cc.tconn.Close()
} }
@ -616,13 +624,13 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
// Certain headers are special-cased as okay but not transmitted later. // Certain headers are special-cased as okay but not transmitted later.
func checkConnHeaders(req *http.Request) error { func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" { if v := req.Header.Get("Upgrade"); v != "" {
return errors.New("http2: invalid Upgrade request header") return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
} }
if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return errors.New("http2: invalid Transfer-Encoding request header") return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
} }
if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
return errors.New("http2: invalid Connection request header") return fmt.Errorf("http2: invalid Connection request header: %q", vv)
} }
return nil return nil
} }
@ -635,19 +643,27 @@ func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
if req.ContentLength != 0 { if req.ContentLength != 0 {
return req.Body, req.ContentLength return req.Body, req.ContentLength
} }
// Don't try to sniff the size if they're doing an expect
// request (Issue 16002):
if req.Header.Get("Expect") == "100-continue" {
return req.Body, -1
}
// We have a body but a zero content length. Test to see if // We have a body but a zero content length. Test to see if
// it's actually zero or just unset. // it's actually zero or just unset.
var buf [1]byte var buf [1]byte
n, rerr := io.ReadFull(body, buf[:]) n, rerr := body.Read(buf[:])
if rerr != nil && rerr != io.EOF { if rerr != nil && rerr != io.EOF {
return errorReader{rerr}, -1 return errorReader{rerr}, -1
} }
if n == 1 { if n == 1 {
// Oh, guess there is data in this Body Reader after all. // Oh, guess there is data in this Body Reader after all.
// The ContentLength field just wasn't set. // The ContentLength field just wasn't set.
// Stich the Body back together again, re-attaching our // Stitch the Body back together again, re-attaching our
// consumed byte. // consumed byte.
if rerr == io.EOF {
return bytes.NewReader(buf[:]), 1
}
return io.MultiReader(bytes.NewReader(buf[:]), body), -1 return io.MultiReader(bytes.NewReader(buf[:]), body), -1
} }
// Body is actually zero bytes. // Body is actually zero bytes.
@ -658,6 +674,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
if err := checkConnHeaders(req); err != nil { if err := checkConnHeaders(req); err != nil {
return nil, err return nil, err
} }
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
trailers, err := commaSeparatedTrailers(req) trailers, err := commaSeparatedTrailers(req)
if err != nil { if err != nil {
@ -665,9 +684,6 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
} }
hasTrailers := trailers != "" hasTrailers := trailers != ""
body, contentLen := bodyAndLength(req)
hasBody := body != nil
cc.mu.Lock() cc.mu.Lock()
cc.lastActive = time.Now() cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() { if cc.closed || !cc.canTakeNewRequestLocked() {
@ -675,6 +691,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return nil, errClientConnUnusable return nil, errClientConnUnusable
} }
body, contentLen := bodyAndLength(req)
hasBody := body != nil
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool var requestedGzip bool
if !cc.t.disableCompression() && if !cc.t.disableCompression() &&
@ -747,9 +766,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
bodyWritten := false bodyWritten := false
ctx := reqContext(req) ctx := reqContext(req)
for { handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
select {
case re := <-readLoopResCh:
res := re.res res := re.res
if re.err != nil || res.StatusCode > 299 { if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any // On error or status code 3xx, 4xx, 5xx, etc abort any
@ -771,6 +788,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
res.Request = req res.Request = req
res.TLS = cc.tlsState res.TLS = cc.tlsState
return res, nil return res, nil
}
for {
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
case <-respHeaderTimer: case <-respHeaderTimer:
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten { if !hasBody || bodyWritten {
@ -804,6 +827,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// forgetStreamID. // forgetStreamID.
return nil, cs.resetErr return nil, cs.resetErr
case err := <-bodyWriter.resc: case err := <-bodyWriter.resc:
// Prefer the read loop's response, if available. Issue 16102.
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
default:
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -908,10 +937,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
err = cc.fr.WriteData(cs.ID, sentEnd, data) err = cc.fr.WriteData(cs.ID, sentEnd, data)
if err == nil { if err == nil {
// TODO(bradfitz): this flush is for latency, not bandwidth. // TODO(bradfitz): this flush is for latency, not bandwidth.
// Most requests won't need this. Make this opt-in or opt-out? // Most requests won't need this. Make this opt-in or
// Use some heuristic on the body type? Nagel-like timers? // opt-out? Use some heuristic on the body type? Nagel-like
// Based on 'n'? Only last chunk of this for loop, unless flow control // timers? Based on 'n'? Only last chunk of this for loop,
// tokens are low? For now, always: // unless flow control tokens are low? For now, always.
// If we change this, see comment below.
err = cc.bw.Flush() err = cc.bw.Flush()
} }
cc.wmu.Unlock() cc.wmu.Unlock()
@ -921,28 +951,33 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
} }
} }
cc.wmu.Lock() if sentEnd {
if !sentEnd { // Already sent END_STREAM (which implies we have no
// trailers) and flushed, because currently all
// WriteData frames above get a flush. So we're done.
return nil
}
var trls []byte var trls []byte
if hasTrailers { if hasTrailers {
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock()
trls = cc.encodeTrailers(req) trls = cc.encodeTrailers(req)
cc.mu.Unlock()
} }
// Avoid forgetting to send an END_STREAM if the encoded cc.wmu.Lock()
// trailers are 0 bytes. Both results produce and END_STREAM. defer cc.wmu.Unlock()
// Two ways to send END_STREAM: either with trailers, or
// with an empty DATA frame.
if len(trls) > 0 { if len(trls) > 0 {
err = cc.writeHeaders(cs.ID, true, trls) err = cc.writeHeaders(cs.ID, true, trls)
} else { } else {
err = cc.fr.WriteData(cs.ID, true, nil) err = cc.fr.WriteData(cs.ID, true, nil)
} }
}
if ferr := cc.bw.Flush(); ferr != nil && err == nil { if ferr := cc.bw.Flush(); ferr != nil && err == nil {
err = ferr err = ferr
} }
cc.wmu.Unlock()
return err return err
} }
@ -995,6 +1030,26 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if host == "" { if host == "" {
host = req.URL.Host host = req.URL.Host
} }
host, err := httplex.PunycodeHostPort(host)
if err != nil {
return nil, err
}
var path string
if req.Method != "CONNECT" {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return nil, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers and return an error before we // Check for any invalid headers and return an error before we
// potentially pollute our hpack state. (We want to be able to // potentially pollute our hpack state. (We want to be able to
@ -1018,8 +1073,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
cc.writeHeader(":authority", host) cc.writeHeader(":authority", host)
cc.writeHeader(":method", req.Method) cc.writeHeader(":method", req.Method)
if req.Method != "CONNECT" { if req.Method != "CONNECT" {
cc.writeHeader(":path", req.URL.RequestURI()) cc.writeHeader(":path", path)
cc.writeHeader(":scheme", "https") cc.writeHeader(":scheme", req.URL.Scheme)
} }
if trailers != "" { if trailers != "" {
cc.writeHeader("trailer", trailers) cc.writeHeader("trailer", trailers)
@ -1146,6 +1201,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
if andRemove && cs != nil && !cc.closed { if andRemove && cs != nil && !cc.closed {
cc.lastActive = time.Now() cc.lastActive = time.Now()
delete(cc.streams, id) delete(cc.streams, id)
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
}
close(cs.done) close(cs.done)
cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
} }
@ -1188,28 +1246,38 @@ func (e GoAwayError) Error() string {
e.LastStreamID, e.ErrCode, e.DebugData) e.LastStreamID, e.ErrCode, e.DebugData)
} }
func isEOFOrNetReadError(err error) bool {
if err == io.EOF {
return true
}
ne, ok := err.(*net.OpError)
return ok && ne.Op == "read"
}
func (rl *clientConnReadLoop) cleanup() { func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc cc := rl.cc
defer cc.tconn.Close() defer cc.tconn.Close()
defer cc.t.connPool().MarkDead(cc) defer cc.t.connPool().MarkDead(cc)
defer close(cc.readerDone) defer close(cc.readerDone)
if cc.idleTimer != nil {
cc.idleTimer.Stop()
}
// Close any response bodies if the server closes prematurely. // Close any response bodies if the server closes prematurely.
// TODO: also do this if we've written the headers but not // TODO: also do this if we've written the headers but not
// gotten a response yet. // gotten a response yet.
err := cc.readerErr err := cc.readerErr
cc.mu.Lock() cc.mu.Lock()
if err == io.EOF { if cc.goAway != nil && isEOFOrNetReadError(err) {
if cc.goAway != nil {
err = GoAwayError{ err = GoAwayError{
LastStreamID: cc.goAway.LastStreamID, LastStreamID: cc.goAway.LastStreamID,
ErrCode: cc.goAway.ErrCode, ErrCode: cc.goAway.ErrCode,
DebugData: cc.goAwayDebug, DebugData: cc.goAwayDebug,
} }
} else { } else if err == io.EOF {
err = io.ErrUnexpectedEOF err = io.ErrUnexpectedEOF
} }
}
for _, cs := range rl.activeRes { for _, cs := range rl.activeRes {
cs.bufPipe.CloseWithError(err) cs.bufPipe.CloseWithError(err)
} }
@ -1228,15 +1296,20 @@ func (rl *clientConnReadLoop) cleanup() {
func (rl *clientConnReadLoop) run() error { func (rl *clientConnReadLoop) run() error {
cc := rl.cc cc := rl.cc
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
gotReply := false // ever saw a reply gotReply := false // ever saw a HEADERS reply
gotSettings := false
for { for {
f, err := cc.fr.ReadFrame() f, err := cc.fr.ReadFrame()
if err != nil { if err != nil {
cc.vlogf("Transport readFrame error: (%T) %v", err, err) cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
} }
if se, ok := err.(StreamError); ok { if se, ok := err.(StreamError); ok {
if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
rl.endStreamError(cs, cc.fr.errDetail) cs.cc.writeStreamReset(cs.ID, se.Code, err)
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
rl.endStreamError(cs, se)
} }
continue continue
} else if err != nil { } else if err != nil {
@ -1245,6 +1318,13 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs { if VerboseLogs {
cc.vlogf("http2: Transport received %s", summarizeFrame(f)) cc.vlogf("http2: Transport received %s", summarizeFrame(f))
} }
if !gotSettings {
if _, ok := f.(*SettingsFrame); !ok {
cc.logf("protocol error: received %T before a SETTINGS frame", f)
return ConnectionError(ErrCodeProtocol)
}
gotSettings = true
}
maybeIdle := false // whether frame might transition us to idle maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) { switch f := f.(type) {
@ -1273,6 +1353,9 @@ func (rl *clientConnReadLoop) run() error {
cc.logf("Transport: unhandled response frame type %T", f) cc.logf("Transport: unhandled response frame type %T", f)
} }
if err != nil { if err != nil {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
return err return err
} }
if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
@ -1522,10 +1605,27 @@ var errClosedResponseBody = errors.New("http2: response body closed")
func (b transportResponseBody) Close() error { func (b transportResponseBody) Close() error {
cs := b.cs cs := b.cs
if cs.bufPipe.Err() != io.EOF { cc := cs.cc
// TODO: write test for this
cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
unread := cs.bufPipe.Len()
if unread > 0 || !serverSentStreamEnd {
cc.mu.Lock()
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
} }
// Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
cc.fr.WriteWindowUpdate(0, uint32(unread))
}
cc.bw.Flush()
cc.wmu.Unlock()
cc.mu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody) cs.bufPipe.BreakWithError(errClosedResponseBody)
return nil return nil
} }
@ -1533,6 +1633,7 @@ func (b transportResponseBody) Close() error {
func (rl *clientConnReadLoop) processData(f *DataFrame) error { func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc cc := rl.cc
cs := cc.streamByID(f.StreamID, f.StreamEnded()) cs := cc.streamByID(f.StreamID, f.StreamEnded())
data := f.Data()
if cs == nil { if cs == nil {
cc.mu.Lock() cc.mu.Lock()
neverSent := cc.nextStreamID neverSent := cc.nextStreamID
@ -1546,10 +1647,22 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// TODO: be stricter here? only silently ignore things which // TODO: be stricter here? only silently ignore things which
// we canceled, but not things which were closed normally // we canceled, but not things which were closed normally
// by the peer? Tough without accumulating too much state. // by the peer? Tough without accumulating too much state.
// But at least return their flow control:
if f.Length > 0 {
cc.mu.Lock()
cc.inflow.add(int32(f.Length))
cc.mu.Unlock()
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(f.Length))
cc.bw.Flush()
cc.wmu.Unlock()
}
return nil return nil
} }
if data := f.Data(); len(data) > 0 { if f.Length > 0 {
if cs.bufPipe.b == nil { if len(data) > 0 && cs.bufPipe.b == nil {
// Data frame after it's already closed? // Data frame after it's already closed?
cc.logf("http2: Transport received DATA frame for closed stream; closing connection") cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
return ConnectionError(ErrCodeProtocol) return ConnectionError(ErrCodeProtocol)
@ -1557,19 +1670,32 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// Check connection-level flow control. // Check connection-level flow control.
cc.mu.Lock() cc.mu.Lock()
if cs.inflow.available() >= int32(len(data)) { if cs.inflow.available() >= int32(f.Length) {
cs.inflow.take(int32(len(data))) cs.inflow.take(int32(f.Length))
} else { } else {
cc.mu.Unlock() cc.mu.Unlock()
return ConnectionError(ErrCodeFlowControl) return ConnectionError(ErrCodeFlowControl)
} }
// Return any padded flow control now, since we won't
// refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
cs.inflow.add(pad)
cc.inflow.add(pad)
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(pad))
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
cc.bw.Flush()
cc.wmu.Unlock()
}
cc.mu.Unlock() cc.mu.Unlock()
if len(data) > 0 {
if _, err := cs.bufPipe.Write(data); err != nil { if _, err := cs.bufPipe.Write(data); err != nil {
rl.endStreamError(cs, err) rl.endStreamError(cs, err)
return err return err
} }
} }
}
if f.StreamEnded() { if f.StreamEnded() {
rl.endStream(cs) rl.endStream(cs)
@ -1593,9 +1719,14 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
} }
cs.bufPipe.closeWithErrorAndCode(err, code) cs.bufPipe.closeWithErrorAndCode(err, code)
delete(rl.activeRes, cs.ID) delete(rl.activeRes, cs.ID)
if cs.req.Close || cs.req.Header.Get("Connection") == "close" { if isConnectionCloseRequest(cs.req) {
rl.closeWhenIdle = true rl.closeWhenIdle = true
} }
select {
case cs.resc <- resAndError{err: err}:
default:
}
} }
func (cs *clientStream) copyTrailers() { func (cs *clientStream) copyTrailers() {
@ -1623,18 +1754,39 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc := rl.cc cc := rl.cc
cc.mu.Lock() cc.mu.Lock()
defer cc.mu.Unlock() defer cc.mu.Unlock()
return f.ForeachSetting(func(s Setting) error {
if f.IsAck() {
if cc.wantSettingsAck {
cc.wantSettingsAck = false
return nil
}
return ConnectionError(ErrCodeProtocol)
}
err := f.ForeachSetting(func(s Setting) error {
switch s.ID { switch s.ID {
case SettingMaxFrameSize: case SettingMaxFrameSize:
cc.maxFrameSize = s.Val cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams: case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val cc.maxConcurrentStreams = s.Val
case SettingInitialWindowSize: case SettingInitialWindowSize:
// TODO: error if this is too large. // Values above the maximum flow-control
// window size of 2^31-1 MUST be treated as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR.
if s.Val > math.MaxInt32 {
return ConnectionError(ErrCodeFlowControl)
}
// TODO: adjust flow control of still-open // Adjust flow control of currently-open
// frames by the difference of the old initial // frames by the difference of the old initial
// window size and this one. // window size and this one.
delta := int32(s.Val) - int32(cc.initialWindowSize)
for _, cs := range cc.streams {
cs.flow.add(delta)
}
cc.cond.Broadcast()
cc.initialWindowSize = s.Val cc.initialWindowSize = s.Val
default: default:
// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
@ -1642,6 +1794,16 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
} }
return nil return nil
}) })
if err != nil {
return err
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
cc.fr.WriteSettingsAck()
cc.bw.Flush()
return cc.werr
} }
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
@ -1678,7 +1840,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
// which closes this, so there // which closes this, so there
// isn't a race. // isn't a race.
default: default:
err := StreamError{cs.ID, f.ErrCode} err := streamError(cs.ID, f.ErrCode)
cs.resetErr = err cs.resetErr = err
close(cs.peerReset) close(cs.peerReset)
cs.bufPipe.CloseWithError(err) cs.bufPipe.CloseWithError(err)
@ -1688,10 +1850,56 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
return nil return nil
} }
// Ping sends a PING frame to the server and waits for the ack.
// Public implementation is in go17.go and not_go17.go
func (cc *ClientConn) ping(ctx contextContext) error {
c := make(chan struct{})
// Generate a random payload
var p [8]byte
for {
if _, err := rand.Read(p[:]); err != nil {
return err
}
cc.mu.Lock()
// check for dup before insert
if _, found := cc.pings[p]; !found {
cc.pings[p] = c
cc.mu.Unlock()
break
}
cc.mu.Unlock()
}
cc.wmu.Lock()
if err := cc.fr.WritePing(false, p); err != nil {
cc.wmu.Unlock()
return err
}
if err := cc.bw.Flush(); err != nil {
cc.wmu.Unlock()
return err
}
cc.wmu.Unlock()
select {
case <-c:
return nil
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
// connection closed
return cc.readerErr
}
}
func (rl *clientConnReadLoop) processPing(f *PingFrame) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() { if f.IsAck() {
// 6.7 PING: " An endpoint MUST NOT respond to PING frames cc := rl.cc
// containing this flag." cc.mu.Lock()
defer cc.mu.Unlock()
// If ack, notify listener if any
if c, ok := cc.pings[f.Data]; ok {
close(c)
delete(cc.pings, f.Data)
}
return nil return nil
} }
cc := rl.cc cc := rl.cc
@ -1715,8 +1923,10 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
} }
func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
// TODO: do something with err? send it as a debug frame to the peer? // TODO: map err to more interesting error codes, once the
// But that's only in GOAWAY. Invent a new frame type? Is there one already? // HTTP community comes up with some. But currently for
// RST_STREAM there's no equivalent to GOAWAY frame's debug
// data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock() cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code) cc.fr.WriteRSTStream(streamID, code)
cc.bw.Flush() cc.bw.Flush()
@ -1866,3 +2076,9 @@ func (s bodyWriterState) scheduleBodyWrite() {
s.timer.Reset(s.delay) s.timer.Reset(s.delay)
} }
} }
// isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection.
func isConnectionCloseRequest(req *http.Request) bool {
return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
}

View File

@ -10,8 +10,11 @@
package httplex package httplex
import ( import (
"net"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
"golang.org/x/net/idna"
) )
var isTokenTable = [127]bool{ var isTokenTable = [127]bool{
@ -310,3 +313,39 @@ func ValidHeaderFieldValue(v string) bool {
} }
return true return true
} }
func isASCII(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}
// PunycodeHostPort returns the IDNA Punycode version
// of the provided "host" or "host:port" string.
func PunycodeHostPort(v string) (string, error) {
if isASCII(v) {
return v, nil
}
host, port, err := net.SplitHostPort(v)
if err != nil {
// The input 'v' argument was just a "host" argument,
// without a port. This error should not be returned
// to the caller.
host = v
port = ""
}
host, err = idna.ToASCII(host)
if err != nil {
// Non-UTF-8? Not representable in Punycode, in any
// case.
return "", err
}
if port == "" {
return host, nil
}
return net.JoinHostPort(host, port), nil
}

View File

@ -91,7 +91,7 @@ var DebugUseAfterFinish = false
// It returns two bools; the first indicates whether the page may be viewed at all, // It returns two bools; the first indicates whether the page may be viewed at all,
// and the second indicates whether sensitive events will be shown. // and the second indicates whether sensitive events will be shown.
// //
// AuthRequest may be replaced by a program to customise its authorisation requirements. // AuthRequest may be replaced by a program to customize its authorization requirements.
// //
// The default AuthRequest function returns (true, true) if and only if the request // The default AuthRequest function returns (true, true) if and only if the request
// comes from localhost/127.0.0.1/[::1]. // comes from localhost/127.0.0.1/[::1].
@ -333,7 +333,8 @@ func New(family, title string) Trace {
tr.ref() tr.ref()
tr.Family, tr.Title = family, title tr.Family, tr.Title = family, title
tr.Start = time.Now() tr.Start = time.Now()
tr.events = make([]event, 0, maxEventsPerTrace) tr.maxEvents = maxEventsPerTrace
tr.events = tr.eventsBuf[:0]
activeMu.RLock() activeMu.RLock()
s := activeTraces[tr.Family] s := activeTraces[tr.Family]
@ -650,8 +651,8 @@ type event struct {
Elapsed time.Duration // since previous event in trace Elapsed time.Duration // since previous event in trace
NewDay bool // whether this event is on a different day to the previous event NewDay bool // whether this event is on a different day to the previous event
Recyclable bool // whether this event was passed via LazyLog Recyclable bool // whether this event was passed via LazyLog
What interface{} // string or fmt.Stringer
Sensitive bool // whether this event contains sensitive information Sensitive bool // whether this event contains sensitive information
What interface{} // string or fmt.Stringer
} }
// WhenString returns a string representation of the elapsed time of the event. // WhenString returns a string representation of the elapsed time of the event.
@ -694,12 +695,15 @@ type trace struct {
// Append-only sequence of events (modulo discards). // Append-only sequence of events (modulo discards).
mu sync.RWMutex mu sync.RWMutex
events []event events []event
maxEvents int
refs int32 // how many buckets this is in refs int32 // how many buckets this is in
recycler func(interface{}) recycler func(interface{})
disc discarded // scratch space to avoid allocation disc discarded // scratch space to avoid allocation
finishStack []byte // where finish was called, if DebugUseAfterFinish is set finishStack []byte // where finish was called, if DebugUseAfterFinish is set
eventsBuf [4]event // preallocated buffer in case we only log a few events
} }
func (tr *trace) reset() { func (tr *trace) reset() {
@ -711,11 +715,15 @@ func (tr *trace) reset() {
tr.traceID = 0 tr.traceID = 0
tr.spanID = 0 tr.spanID = 0
tr.IsError = false tr.IsError = false
tr.maxEvents = 0
tr.events = nil tr.events = nil
tr.refs = 0 tr.refs = 0
tr.recycler = nil tr.recycler = nil
tr.disc = 0 tr.disc = 0
tr.finishStack = nil tr.finishStack = nil
for i := range tr.eventsBuf {
tr.eventsBuf[i] = event{}
}
} }
// delta returns the elapsed time since the last event or the trace start, // delta returns the elapsed time since the last event or the trace start,
@ -753,11 +761,11 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
tr.mu.Lock() tr.mu.Lock()
e.Elapsed, e.NewDay = tr.delta(e.When) e.Elapsed, e.NewDay = tr.delta(e.When)
if len(tr.events) < cap(tr.events) { if len(tr.events) < tr.maxEvents {
tr.events = append(tr.events, e) tr.events = append(tr.events, e)
} else { } else {
// Discard the middle events. // Discard the middle events.
di := int((cap(tr.events) - 1) / 2) di := int((tr.maxEvents - 1) / 2)
if d, ok := tr.events[di].What.(*discarded); ok { if d, ok := tr.events[di].What.(*discarded); ok {
(*d)++ (*d)++
} else { } else {
@ -777,7 +785,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
go tr.recycler(tr.events[di+1].What) go tr.recycler(tr.events[di+1].What)
} }
copy(tr.events[di+1:], tr.events[di+2:]) copy(tr.events[di+1:], tr.events[di+2:])
tr.events[cap(tr.events)-1] = e tr.events[tr.maxEvents-1] = e
} }
tr.mu.Unlock() tr.mu.Unlock()
} }
@ -803,7 +811,7 @@ func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
func (tr *trace) SetMaxEvents(m int) { func (tr *trace) SetMaxEvents(m int) {
// Always keep at least three events: first, discarded count, last. // Always keep at least three events: first, discarded count, last.
if len(tr.events) == 0 && m > 3 { if len(tr.events) == 0 && m > 3 {
tr.events = make([]event, 0, m) tr.maxEvents = m
} }
} }