Add structcheck, unused, and varcheck linters.

Warn on unused and dead code

Signed-off-by: Daniel Nephin <dnephin@gmail.com>
This commit is contained in:
Daniel Nephin 2017-11-13 16:21:26 -05:00
parent 96e2f30250
commit f74862a0dd
35 changed files with 29 additions and 444 deletions

View File

@ -10,6 +10,10 @@
"WarnUnmatchedDirective": true, "WarnUnmatchedDirective": true,
"Enable": [ "Enable": [
"structcheck",
"unused",
"varcheck",
"gofmt", "gofmt",
"goimports", "goimports",
"golint", "golint",

View File

@ -1 +0,0 @@
package archive

View File

@ -29,16 +29,6 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
) )
const usage = `
__ _ __ __ _
_________ ____ / /_____ _(_)___ ___ _________/ / _____/ /_ (_)___ ___
/ ___/ __ \/ __ \/ __/ __ ` + "`" + `/ / __ \/ _ \/ ___/ __ /_____/ ___/ __ \/ / __ ` + "`" + `__ \
/ /__/ /_/ / / / / /_/ /_/ / / / / / __/ / / /_/ /_____(__ ) / / / / / / / / /
\___/\____/_/ /_/\__/\__,_/_/_/ /_/\___/_/ \__,_/ /____/_/ /_/_/_/ /_/ /_/
shim for container lifecycle and reconnection
`
var ( var (
debugFlag bool debugFlag bool
namespaceFlag string namespaceFlag string

View File

@ -236,16 +236,6 @@ func (w *worker) getID() string {
return fmt.Sprintf("%d-%d", w.id, w.count) return fmt.Sprintf("%d-%d", w.id, w.count)
} }
func (w *worker) cleanup(ctx context.Context, c containerd.Container) {
if err := c.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
if err == context.DeadlineExceeded {
return
}
w.failures++
logrus.WithError(err).Errorf("delete container %s", c.ID())
}
}
// cleanup cleans up any containers in the "stress" namespace before the test run // cleanup cleans up any containers in the "stress" namespace before the test run
func cleanup(ctx context.Context, client *containerd.Client) error { func cleanup(ctx context.Context, client *containerd.Client) error {
containers, err := client.Containers(ctx) containers, err := client.Containers(ctx)

View File

@ -8,7 +8,6 @@ import (
"net" "net"
"os" "os"
"os/signal" "os/signal"
"runtime"
"time" "time"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
@ -196,18 +195,3 @@ func setLevel(context *cli.Context, config *server.Config) error {
} }
return nil return nil
} }
func dumpStacks() {
var (
buf []byte
stackSize int
)
bufferLen := 16384
for stackSize == len(buf) {
buf = make([]byte, bufferLen)
stackSize = runtime.Stack(buf, true)
bufferLen *= 2
}
buf = buf[:stackSize]
logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
}

View File

@ -5,7 +5,9 @@ package main
import ( import (
"context" "context"
"os" "os"
"runtime"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
@ -55,3 +57,18 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se
}() }()
return done return done
} }
func dumpStacks() {
var (
buf []byte
stackSize int
)
bufferLen := 16384
for stackSize == len(buf) {
buf = make([]byte, bufferLen)
stackSize = runtime.Stack(buf, true)
bufferLen *= 2
}
buf = buf[:stackSize]
logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
}

View File

@ -133,13 +133,6 @@ var pushCommand = cli.Command{
}, },
} }
type pushStatus struct {
name string
started bool
written int64
total int64
}
type pushjobs struct { type pushjobs struct {
jobs map[string]struct{} jobs map[string]struct{}
ordered []string ordered []string

View File

@ -14,8 +14,6 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
) )
const pipeRoot = `\\.\pipe`
func init() { func init() {
Command.Flags = append(Command.Flags, cli.StringSliceFlag{ Command.Flags = append(Command.Flags, cli.StringSliceFlag{
Name: "layer", Name: "layer",
@ -50,10 +48,6 @@ func withTTY(terminal bool) containerd.SpecOpts {
return containerd.WithTTY(int(size.Width), int(size.Height)) return containerd.WithTTY(int(size.Width), int(size.Height))
} }
func setHostNetworking() containerd.SpecOpts {
return nil
}
func newContainer(ctx gocontext.Context, client *containerd.Client, context *cli.Context) (containerd.Container, error) { func newContainer(ctx gocontext.Context, client *containerd.Client, context *cli.Context) (containerd.Container, error) {
var ( var (
// ref = context.Args().First() // ref = context.Args().First()

View File

@ -1,91 +0,0 @@
package shim
import (
"io"
"net"
"os"
"sync"
"github.com/Microsoft/go-winio"
clog "github.com/containerd/containerd/log"
"github.com/pkg/errors"
)
func prepareStdio(stdin, stdout, stderr string, console bool) (*sync.WaitGroup, error) {
var wg sync.WaitGroup
if stdin != "" {
l, err := winio.ListenPipe(stdin, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to create stdin pipe %s", stdin)
}
defer func(l net.Listener) {
if err != nil {
l.Close()
}
}(l)
go func() {
c, err := l.Accept()
if err != nil {
clog.L.WithError(err).Errorf("failed to accept stdin connection on %s", stdin)
return
}
io.Copy(c, os.Stdin)
c.Close()
l.Close()
}()
}
if stdout != "" {
l, err := winio.ListenPipe(stdout, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to create stdin pipe %s", stdout)
}
defer func(l net.Listener) {
if err != nil {
l.Close()
}
}(l)
wg.Add(1)
go func() {
defer wg.Done()
c, err := l.Accept()
if err != nil {
clog.L.WithError(err).Errorf("failed to accept stdout connection on %s", stdout)
return
}
io.Copy(os.Stdout, c)
c.Close()
l.Close()
}()
}
if !console && stderr != "" {
l, err := winio.ListenPipe(stderr, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to create stderr pipe %s", stderr)
}
defer func(l net.Listener) {
if err != nil {
l.Close()
}
}(l)
wg.Add(1)
go func() {
defer wg.Done()
c, err := l.Accept()
if err != nil {
clog.L.WithError(err).Errorf("failed to accept stderr connection on %s", stderr)
return
}
io.Copy(os.Stderr, c)
c.Close()
l.Close()
}()
}
return &wg, nil
}

View File

@ -1028,9 +1028,9 @@ func testUserNamespaces(t *testing.T, readonlyRootFS bool) {
WithUserNamespace(0, 1000, 10000), WithUserNamespace(0, 1000, 10000),
)} )}
if readonlyRootFS { if readonlyRootFS {
opts = append(opts, withRemappedSnapshotView(id, image, 1000, 1000)) opts = append(opts, WithRemappedSnapshotView(id, image, 1000, 1000))
} else { } else {
opts = append(opts, withRemappedSnapshot(id, image, 1000, 1000)) opts = append(opts, WithRemappedSnapshot(id, image, 1000, 1000))
} }
container, err := client.NewContainer(ctx, id, opts...) container, err := client.NewContainer(ctx, id, opts...)

View File

@ -437,7 +437,6 @@ func TestContainerCloseIO(t *testing.T) {
} }
defer container.Delete(ctx, WithSnapshotCleanup) defer container.Delete(ctx, WithSnapshotCleanup)
const expected = "hello" + newLine
stdout := bytes.NewBuffer(nil) stdout := bytes.NewBuffer(nil)
r, w, err := os.Pipe() r, w, err := os.Pipe()

View File

@ -10,15 +10,6 @@ import (
"github.com/containerd/containerd/sys" "github.com/containerd/containerd/sys"
) )
func getStartTime(fi os.FileInfo) time.Time {
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
return time.Unix(int64(sys.StatCtime(st).Sec),
int64(sys.StatCtime(st).Nsec))
}
return fi.ModTime()
}
func getATime(fi os.FileInfo) time.Time { func getATime(fi os.FileInfo) time.Time {
if st, ok := fi.Sys().(*syscall.Stat_t); ok { if st, ok := fi.Sys().(*syscall.Stat_t); ok {
return time.Unix(int64(sys.StatAtime(st).Sec), return time.Unix(int64(sys.StatAtime(st).Sec),

View File

@ -5,10 +5,6 @@ import (
"time" "time"
) )
func getStartTime(fi os.FileInfo) time.Time {
return fi.ModTime()
}
func getATime(fi os.FileInfo) time.Time { func getATime(fi os.FileInfo) time.Time {
return fi.ModTime() return fi.ModTime()
} }

View File

@ -4,7 +4,6 @@ import (
"bufio" "bufio"
"fmt" "fmt"
"os" "os"
"sort"
"strconv" "strconv"
"strings" "strings"
) )
@ -55,12 +54,3 @@ func getMaps(pid int) (map[string]int, error) {
} }
return smaps, nil return smaps, nil
} }
func keys(smaps map[string]int) []string {
var o []string
for k := range smaps {
o = append(o, k)
}
sort.Strings(o)
return o
}

View File

@ -5,36 +5,12 @@ package fs
import ( import (
"bytes" "bytes"
"os" "os"
"path/filepath"
"strings"
"syscall" "syscall"
"github.com/containerd/continuity/sysx" "github.com/containerd/continuity/sysx"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const whiteoutPrefix = ".wh."
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
// whiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
// detectDirDiff returns diff dir options if a directory could // detectDirDiff returns diff dir options if a directory could
// be found in the mount info for upper which is the direct // be found in the mount info for upper which is the direct
// diff with the provided lower directory // diff with the provided lower directory
@ -45,26 +21,6 @@ func detectDirDiff(upper, lower string) *diffDirOptions {
return nil return nil
} }
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+whiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, whiteoutPrefix) {
originalFile := f[len(whiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
// compareSysStat returns whether the stats are equivalent, // compareSysStat returns whether the stats are equivalent,
// whether the files are considered the same file, and // whether the files are considered the same file, and
// an error // an error

View File

@ -24,7 +24,6 @@ func TestRootPath(t *testing.T) {
name string name string
apply fstest.Applier apply fstest.Applier
checks []RootCheck checks []RootCheck
scope func(string) (string, error)
}{ }{
{ {
name: "SymlinkAbsolute", name: "SymlinkAbsolute",

View File

@ -108,47 +108,3 @@ func toNodes(s []string) []Node {
} }
return n return n
} }
func newScanner(refs []string) *stringScanner {
return &stringScanner{
i: -1,
s: refs,
}
}
type stringScanner struct {
i int
s []string
}
func (ss *stringScanner) Next() bool {
ss.i++
return ss.i < len(ss.s)
}
func (ss *stringScanner) Node() Node {
return Node{
Key: ss.s[ss.i],
}
}
func (ss *stringScanner) Cleanup() error {
ss.s[ss.i] = ""
return nil
}
func (ss *stringScanner) Err() error {
return nil
}
func (ss *stringScanner) All() []Node {
remaining := make([]Node, 0, len(ss.s))
for _, s := range ss.s {
if s != "" {
remaining = append(remaining, Node{
Key: s,
})
}
}
return remaining
}

View File

@ -40,8 +40,6 @@ func withExecArgs(s *specs.Process, args ...string) {
} }
var ( var (
withRemappedSnapshot = WithRemappedSnapshot withNewSnapshot = WithNewSnapshot
withRemappedSnapshotView = WithRemappedSnapshotView withImageConfig = WithImageConfig
withNewSnapshot = WithNewSnapshot
withImageConfig = WithImageConfig
) )

View File

@ -52,19 +52,3 @@ func withNewSnapshot(id string, i Image) NewContainerOpts {
return nil return nil
} }
} }
func withRemappedSnapshot(id string, i Image, u, g uint32) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
return nil
}
}
func withRemappedSnapshotView(id string, i Image, u, g uint32) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
return nil
}
}
func withNoop(_ context.Context, _ *Client, _ *containers.Container, _ *specs.Spec) error {
return nil
}

View File

@ -9,44 +9,10 @@ import (
"sync" "sync"
"syscall" "syscall"
"github.com/containerd/console"
"github.com/containerd/fifo" "github.com/containerd/fifo"
runc "github.com/containerd/go-runc" runc "github.com/containerd/go-runc"
) )
func copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
if stdin != "" {
in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
if err != nil {
return err
}
cwg.Add(1)
go func() {
cwg.Done()
io.Copy(console, in)
}()
}
outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0)
if err != nil {
return err
}
outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0)
if err != nil {
return err
}
wg.Add(1)
cwg.Add(1)
go func() {
cwg.Done()
io.Copy(outw, console)
console.Close()
outr.Close()
outw.Close()
wg.Done()
}()
return nil
}
func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error { func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){ for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){
stdout: func(wc io.WriteCloser, rc io.Closer) { stdout: func(wc io.WriteCloser, rc io.Closer) {

View File

@ -154,11 +154,6 @@ func connect(address string, d func(string, time.Duration) (net.Conn, error)) (*
return conn, nil return conn, nil
} }
func dialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://")
return net.DialTimeout("unix", address, timeout)
}
func annonDialer(address string, timeout time.Duration) (net.Conn, error) { func annonDialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://") address = strings.TrimPrefix(address, "unix://")
return net.DialTimeout("unix", "\x00"+address, timeout) return net.DialTimeout("unix", "\x00"+address, timeout)

View File

@ -31,7 +31,6 @@ var (
bucketKeyVersion = []byte(schemaVersion) bucketKeyVersion = []byte(schemaVersion)
bucketKeyDBVersion = []byte("version") // stores the version of the schema bucketKeyDBVersion = []byte("version") // stores the version of the schema
bucketKeyObjectLabels = []byte("labels") // stores the labels for a namespace. bucketKeyObjectLabels = []byte("labels") // stores the labels for a namespace.
bucketKeyObjectIndexes = []byte("indexes") // reserved
bucketKeyObjectImages = []byte("images") // stores image objects bucketKeyObjectImages = []byte("images") // stores image objects
bucketKeyObjectContainers = []byte("containers") // stores container objects bucketKeyObjectContainers = []byte("containers") // stores container objects
bucketKeyObjectSnapshots = []byte("snapshots") // stores snapshot references bucketKeyObjectSnapshots = []byte("snapshots") // stores snapshot references

View File

@ -402,22 +402,6 @@ func addLeaseContent(ns, lid string, dgst digest.Digest) alterFunc {
} }
} }
func addContainer(ns, name, snapshotter, snapshot string, labels map[string]string) alterFunc {
return func(bkt *bolt.Bucket) error {
cbkt, err := createBuckets(bkt, ns, string(bucketKeyObjectContainers), name)
if err != nil {
return err
}
if err := cbkt.Put(bucketKeySnapshotter, []byte(snapshotter)); err != nil {
return err
}
if err := cbkt.Put(bucketKeySnapshotKey, []byte(snapshot)); err != nil {
return err
}
return boltutil.WriteLabels(cbkt, labels)
}
}
func createBuckets(bkt *bolt.Bucket, names ...string) (*bolt.Bucket, error) { func createBuckets(bkt *bolt.Bucket, names ...string) (*bolt.Bucket, error) {
for _, name := range names { for _, name := range names {
nbkt, err := bkt.CreateBucketIfNotExists([]byte(name)) nbkt, err := bkt.CreateBucketIfNotExists([]byte(name))

View File

@ -39,14 +39,6 @@ func createKey(id uint64, namespace, key string) string {
return fmt.Sprintf("%s/%d/%s", namespace, id, key) return fmt.Sprintf("%s/%d/%s", namespace, id, key)
} }
func trimKey(key string) string {
parts := strings.SplitN(key, "/", 3)
if len(parts) < 3 {
return ""
}
return parts[2]
}
func getKey(tx *bolt.Tx, ns, name, key string) string { func getKey(tx *bolt.Tx, ns, name, key string) string {
bkt := getSnapshotterBucket(tx, ns, name) bkt := getSnapshotterBucket(tx, ns, name)
if bkt == nil { if bkt == nil {

View File

@ -3,6 +3,6 @@ package progress
const ( const (
escape = "\x1b" escape = "\x1b"
reset = escape + "[0m" reset = escape + "[0m"
red = escape + "[31m" red = escape + "[31m" // nolint: unused, varcheck
green = escape + "[32m" green = escape + "[32m"
) )

View File

@ -7,7 +7,6 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/textproto"
"net/url" "net/url"
"path" "path"
"strconv" "strconv"
@ -405,22 +404,6 @@ func copyRequest(req *http.Request) (*http.Request, error) {
return &ireq, nil return &ireq, nil
} }
func isManifestAccept(h http.Header) bool {
for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] {
switch ah {
case images.MediaTypeDockerSchema2Manifest:
fallthrough
case images.MediaTypeDockerSchema2ManifestList:
fallthrough
case ocispec.MediaTypeImageManifest:
fallthrough
case ocispec.MediaTypeImageIndex:
return true
}
}
return false
}
func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string) error { func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string) error {
realm, ok := params["realm"] realm, ok := params["realm"]
if !ok { if !ok {

View File

@ -29,10 +29,6 @@ import (
const manifestSizeLimit = 8e6 // 8MB const manifestSizeLimit = 8e6 // 8MB
var (
mediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
)
type blobState struct { type blobState struct {
diffID digest.Digest diffID digest.Digest
empty bool empty bool

View File

@ -17,16 +17,6 @@ func imagesToProto(images []images.Image) []imagesapi.Image {
return imagespb return imagespb
} }
func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
var images []images.Image
for _, image := range imagespb {
images = append(images, imageFromProto(&image))
}
return images
}
func imageToProto(image *images.Image) imagesapi.Image { func imageToProto(image *images.Image) imagesapi.Image {
return imagesapi.Image{ return imagesapi.Image{
Name: image.Name, Name: image.Name,

View File

@ -38,13 +38,6 @@ type snapshotter struct {
ms *storage.MetaStore ms *storage.MetaStore
} }
type activeSnapshot struct {
id string
name string
parentID interface{}
readonly bool
}
// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs // NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs
// diffs are stored under the provided root. A metadata file is stored under // diffs are stored under the provided root. A metadata file is stored under
// the root. // the root.

View File

@ -444,11 +444,6 @@ func testCommit(ctx context.Context, t *testing.T, ms *MetaStore) {
assertNotActive(t, err) assertNotActive(t, err)
} }
func testCommitNotExist(ctx context.Context, t *testing.T, ms *MetaStore) {
_, err := CommitActive(ctx, "active-not-exist", "committed-1", snapshot.Usage{})
assertNotExist(t, err)
}
func testCommitExist(ctx context.Context, t *testing.T, ms *MetaStore) { func testCommitExist(ctx context.Context, t *testing.T, ms *MetaStore) {
if err := basePopulate(ctx, ms); err != nil { if err := basePopulate(ctx, ms); err != nil {
t.Fatalf("Populate failed: %+v", err) t.Fatalf("Populate failed: %+v", err)

View File

@ -99,6 +99,7 @@ func checkChown(ctx context.Context, t *testing.T, sn snapshot.Snapshotter, work
// checkRename // checkRename
// https://github.com/docker/docker/issues/25409 // https://github.com/docker/docker/issues/25409
func checkRename(ctx context.Context, t *testing.T, sn snapshot.Snapshotter, work string) { func checkRename(ctx context.Context, t *testing.T, sn snapshot.Snapshotter, work string) {
t.Skip("rename test still fails on some kernels with overlay")
l1Init := fstest.Apply( l1Init := fstest.Apply(
fstest.CreateDir("/dir1", 0700), fstest.CreateDir("/dir1", 0700),
fstest.CreateDir("/somefiles", 0700), fstest.CreateDir("/somefiles", 0700),

View File

@ -38,8 +38,7 @@ func SnapshotterSuite(t *testing.T, name string, snapshotterFn func(ctx context.
t.Run("RemoveIntermediateSnapshot", makeTest(name, snapshotterFn, checkRemoveIntermediateSnapshot)) t.Run("RemoveIntermediateSnapshot", makeTest(name, snapshotterFn, checkRemoveIntermediateSnapshot))
t.Run("DeletedFilesInChildSnapshot", makeTest(name, snapshotterFn, checkDeletedFilesInChildSnapshot)) t.Run("DeletedFilesInChildSnapshot", makeTest(name, snapshotterFn, checkDeletedFilesInChildSnapshot))
t.Run("MoveFileFromLowerLayer", makeTest(name, snapshotterFn, checkFileFromLowerLayer)) t.Run("MoveFileFromLowerLayer", makeTest(name, snapshotterFn, checkFileFromLowerLayer))
// Rename test still fails on some kernels with overlay t.Run("Rename", makeTest(name, snapshotterFn, checkRename))
//t.Run("Rename", makeTest(name, snapshotterFn, checkRename))
t.Run("ViewReadonly", makeTest(name, snapshotterFn, checkSnapshotterViewReadonly)) t.Run("ViewReadonly", makeTest(name, snapshotterFn, checkSnapshotterViewReadonly))

View File

@ -8,7 +8,6 @@ import (
"io" "io"
goruntime "runtime" goruntime "runtime"
"strings" "strings"
"sync"
"syscall" "syscall"
"time" "time"
@ -152,8 +151,6 @@ type task struct {
io cio.IO io cio.IO
id string id string
pid uint32 pid uint32
mu sync.Mutex
} }
// Pid returns the pid or process id for the task // Pid returns the pid or process id for the task

View File

@ -1,4 +1,4 @@
// +build !linux // +build !linux,!windows
package testutil package testutil

View File

@ -10,7 +10,6 @@ import (
"strings" "strings"
"github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim"
"github.com/Microsoft/opengcs/client"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
@ -147,53 +146,6 @@ func newWindowsContainerConfig(ctx context.Context, owner, id string, spec *spec
return conf, nil return conf, nil
} }
// newLinuxConfig generates a hcsshim Linux container configuration from the
// provided OCI Spec
func newLinuxConfig(ctx context.Context, owner, id string, spec *specs.Spec) (*hcsshim.ContainerConfig, error) {
conf, err := newContainerConfig(ctx, owner, id, spec)
if err != nil {
return nil, err
}
conf.ContainerType = "Linux"
conf.HvPartition = true
if len(spec.Windows.LayerFolders) < 1 {
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
"spec.Windows.LayerFolders must have at least 1 layer")
}
var (
layerFolders = spec.Windows.LayerFolders
)
config := &client.Config{}
if err := config.GenerateDefault(nil); err != nil {
return nil, err
}
conf.HvRuntime = &hcsshim.HvRuntime{
ImagePath: config.KirdPath,
LinuxKernelFile: config.KernelFile,
LinuxInitrdFile: config.InitrdFile,
LinuxBootParameters: config.BootParameters,
}
// TODO: use the create request Mount for those
for _, layerPath := range layerFolders {
_, filename := filepath.Split(layerPath)
guid, err := hcsshim.NameToGuid(filename)
if err != nil {
return nil, errors.Wrapf(err, "unable to get GUID for %s", filename)
}
conf.Layers = append(conf.Layers, hcsshim.Layer{
ID: guid.ToString(),
Path: filepath.Join(layerPath, "layer.vhd"),
})
}
return conf, nil
}
// removeLayer deletes the given layer, all associated containers must have // removeLayer deletes the given layer, all associated containers must have
// been shutdown for this to succeed. // been shutdown for this to succeed.
func removeLayer(ctx context.Context, path string) error { func removeLayer(ctx context.Context, path string) error {
@ -260,9 +212,3 @@ func newWindowsProcessConfig(processSpec *specs.Process, pset *pipeSet) *hcsshim
conf.CommandLine = strings.Join(processSpec.Args, " ") conf.CommandLine = strings.Join(processSpec.Args, " ")
return conf return conf
} }
func newLinuxProcessConfig(processSpec *specs.Process, pset *pipeSet) (*hcsshim.ProcessConfig, error) {
conf := newProcessConfig(processSpec, pset)
conf.CommandArgs = processSpec.Args
return conf, nil
}