Merge pull request #1196 from mlaventure/update-windows-runtime

Update windows runtime
This commit is contained in:
Stephen Day 2017-07-21 15:12:53 -07:00 committed by GitHub
commit dd7642fc1c
66 changed files with 2695 additions and 1605 deletions

30
.appveyor.yml Normal file
View File

@ -0,0 +1,30 @@
version: "{build}"
image: Visual Studio 2017
clone_folder: c:\gopath\src\github.com\containerd\containerd
environment:
GOPATH: C:\gopath
CGO_ENABLED: 1
before_build:
- choco install -y mingw
- choco install codecov
build_script:
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe fmt"
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe vet"
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe build"
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe binaries"
test_script:
# TODO: need an equivalent of TRAVIS_COMMIT_RANGE
# - GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" C:\MinGW\bin\mingw32-make.exe dco
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe integration"
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe coverage"
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe root-coverage"
on_success:
# Note that, a Codecov upload token is not required.
- codecov -f coverage.txt

View File

@ -9,15 +9,18 @@ VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
ifneq "$(strip $(shell command -v go 2>/dev/null))" "" ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
GOOS ?= $(shell go env GOOS) GOOS ?= $(shell go env GOOS)
else else
GOOS ?= $$GOOS GOOS ?= $$GOOS
endif endif
WHALE = "🇩" WHALE = "🇩"
ONI = "👹" ONI = "👹"
FIX_PATH = $1
ifeq ("$(OS)", "Windows_NT") ifeq ("$(OS)", "Windows_NT")
WHALE="+" WHALE="+"
ONI="-" ONI="-"
FIX_PATH = $(subst /,\,$1)
endif endif
GOARCH ?= $(shell go env GOARCH) GOARCH ?= $(shell go env GOARCH)
@ -44,7 +47,7 @@ GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)" GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)"
# Flags passed to `go test` # Flags passed to `go test`
TESTFLAGS ?=-parallel 8 -race TESTFLAGS ?=-parallel 8 -race -v
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor release .PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor release
.DEFAULT: default .DEFAULT: default
@ -88,7 +91,7 @@ vet: binaries ## run go vet
fmt: ## run go fmt fmt: ## run go fmt
@echo "$(WHALE) $@" @echo "$(WHALE) $@"
@test -z "$$(gofmt -s -l . | grep -v vendor/ | grep -v ".pb.go$$" | tee /dev/stderr)" || \ @test -z "$$(gofmt -s -l . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go$$" | tee /dev/stderr)" || \
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false) (echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ @test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
(echo "$(ONI) please indent proto files with tabs only" && false) (echo "$(ONI) please indent proto files with tabs only" && false)
@ -97,7 +100,7 @@ fmt: ## run go fmt
lint: ## run go lint lint: ## run go lint
@echo "$(WHALE) $@" @echo "$(WHALE) $@"
@test -z "$$(golint ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)" @test -z "$$(golint ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
dco: ## dco check dco: ## dco check
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false) @which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
@ -109,11 +112,11 @@ endif
ineffassign: ## run ineffassign ineffassign: ## run ineffassign
@echo "$(WHALE) $@" @echo "$(WHALE) $@"
@test -z "$$(ineffassign . | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)" @test -z "$$(ineffassign . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
#errcheck: ## run go errcheck #errcheck: ## run go errcheck
# @echo "$(WHALE) $@" # @echo "$(WHALE) $@"
# @test -z "$$(errcheck ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)" # @test -z "$$(errcheck ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
build: ## build the go packages build: ## build the go packages
@echo "$(WHALE) $@" @echo "$(WHALE) $@"

View File

@ -1,3 +1,5 @@
// +build !windows
package archive package archive
import ( import (

View File

@ -1,3 +1,5 @@
// +build !windows
package archive package archive
import ( import (

View File

@ -20,7 +20,7 @@ func BenchmarkContainerCreate(b *testing.B) {
b.Error(err) b.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("true")) spec, err := GenerateSpec(WithImageConfig(ctx, image), withTrue())
if err != nil { if err != nil {
b.Error(err) b.Error(err)
return return
@ -63,7 +63,7 @@ func BenchmarkContainerStart(b *testing.B) {
b.Error(err) b.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("true")) spec, err := GenerateSpec(WithImageConfig(ctx, image), withTrue())
if err != nil { if err != nil {
b.Error(err) b.Error(err)
return return

View File

@ -1,3 +1,5 @@
// +build !windows
package containerd package containerd
import ( import (

View File

@ -7,7 +7,6 @@ import (
"log" "log"
"net/http" "net/http"
"runtime" "runtime"
"strconv"
"sync" "sync"
"time" "time"
@ -34,11 +33,9 @@ import (
imagesservice "github.com/containerd/containerd/services/images" imagesservice "github.com/containerd/containerd/services/images"
snapshotservice "github.com/containerd/containerd/services/snapshot" snapshotservice "github.com/containerd/containerd/services/snapshot"
"github.com/containerd/containerd/snapshot" "github.com/containerd/containerd/snapshot"
"github.com/containerd/containerd/typeurl"
pempty "github.com/golang/protobuf/ptypes/empty" pempty "github.com/golang/protobuf/ptypes/empty"
"github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
@ -48,13 +45,6 @@ import (
func init() { func init() {
// reset the grpc logger so that it does not output in the STDIO of the calling process // reset the grpc logger so that it does not output in the STDIO of the calling process
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
// register TypeUrls for commonly marshaled external types
major := strconv.Itoa(specs.VersionMajor)
typeurl.Register(&specs.Spec{}, "opencontainers/runtime-spec", major, "Spec")
typeurl.Register(&specs.Process{}, "opencontainers/runtime-spec", major, "Process")
typeurl.Register(&specs.LinuxResources{}, "opencontainers/runtime-spec", major, "LinuxResources")
typeurl.Register(&specs.WindowsResources{}, "opencontainers/runtime-spec", major, "WindowsResources")
} }
type clientOpts struct { type clientOpts struct {

View File

@ -7,6 +7,7 @@ import (
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"runtime"
"syscall" "syscall"
"testing" "testing"
"time" "time"
@ -17,11 +18,6 @@ import (
"github.com/containerd/containerd/testutil" "github.com/containerd/containerd/testutil"
) )
const (
defaultRoot = "/var/lib/containerd-test"
testImage = "docker.io/library/alpine:latest"
)
var ( var (
address string address string
noDaemon bool noDaemon bool
@ -29,7 +25,7 @@ var (
) )
func init() { func init() {
flag.StringVar(&address, "address", "/run/containerd-test/containerd.sock", "The address to the containerd socket for use in the tests") flag.StringVar(&address, "address", defaultAddress, "The address to the containerd socket for use in the tests")
flag.BoolVar(&noDaemon, "no-daemon", false, "Do not start a dedicated daemon for the tests") flag.BoolVar(&noDaemon, "no-daemon", false, "Do not start a dedicated daemon for the tests")
flag.Parse() flag.Parse()
} }
@ -57,11 +53,15 @@ func TestMain(m *testing.M) {
defer cancel() defer cancel()
if !noDaemon { if !noDaemon {
os.RemoveAll(defaultRoot)
// setup a new containerd daemon if !testing.Short // setup a new containerd daemon if !testing.Short
cmd = exec.Command("containerd", cmd = exec.Command("containerd",
"--root", defaultRoot, "--root", defaultRoot,
"--address", address, "--address", address,
"--log-level", "debug",
) )
cmd.Stdout = buf
cmd.Stderr = buf cmd.Stderr = buf
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
cmd.Wait() cmd.Wait()
@ -94,14 +94,22 @@ func TestMain(m *testing.M) {
}).Info("running tests against containerd") }).Info("running tests against containerd")
// pull a seed image // pull a seed image
if _, err = client.Pull(ctx, testImage, WithPullUnpack); err != nil { if runtime.GOOS != "windows" { // TODO: remove once pull is supported on windows
cmd.Process.Signal(syscall.SIGTERM) if _, err = client.Pull(ctx, testImage, WithPullUnpack); err != nil {
cmd.Wait() cmd.Process.Signal(syscall.SIGTERM)
fmt.Fprintf(os.Stderr, "%s: %s", err, buf.String()) cmd.Wait()
fmt.Fprintf(os.Stderr, "%s: %s", err, buf.String())
os.Exit(1)
}
}
if err := platformTestSetup(client); err != nil {
fmt.Fprintln(os.Stderr, "platform test setup failed", err)
os.Exit(1) os.Exit(1)
} }
if err := client.Close(); err != nil { if err := client.Close(); err != nil {
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, "failed to close client", err)
} }
// run the test // run the test
@ -110,13 +118,15 @@ func TestMain(m *testing.M) {
if !noDaemon { if !noDaemon {
// tear down the daemon and resources created // tear down the daemon and resources created
if err := cmd.Process.Signal(syscall.SIGTERM); err != nil { if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
fmt.Fprintln(os.Stderr, err) if err := cmd.Process.Kill(); err != nil {
fmt.Fprintln(os.Stderr, "failed to signal containerd", err)
}
} }
if err := cmd.Wait(); err != nil { if err := cmd.Wait(); err != nil {
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, "failed to wait for containerd", err)
} }
if err := os.RemoveAll(defaultRoot); err != nil { if err := os.RemoveAll(defaultRoot); err != nil {
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, "failed to remove test root dir", err)
os.Exit(1) os.Exit(1)
} }
// only print containerd logs if the test failed // only print containerd logs if the test failed
@ -171,6 +181,11 @@ func TestNewClient(t *testing.T) {
} }
func TestImagePull(t *testing.T) { func TestImagePull(t *testing.T) {
if runtime.GOOS == "windows" {
// TODO: remove once Windows has a snapshotter
t.Skip("Windows does not have a snapshotter yet")
}
client, err := newClient(t, address) client, err := newClient(t, address)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -9,9 +9,6 @@ import (
"time" "time"
) )
// DefaultAddress is the default unix socket address
const DefaultAddress = "/run/containerd/containerd.sock"
func dialer(address string, timeout time.Duration) (net.Conn, error) { func dialer(address string, timeout time.Duration) (net.Conn, error) {
address = strings.TrimPrefix(address, "unix://") address = strings.TrimPrefix(address, "unix://")
return net.DialTimeout("unix", address, timeout) return net.DialTimeout("unix", address, timeout)

13
client_unix_test.go Normal file
View File

@ -0,0 +1,13 @@
// +build !windows
package containerd
const (
defaultRoot = "/var/lib/containerd-test"
defaultAddress = "/run/containerd-test/containerd.sock"
testImage = "docker.io/library/alpine:latest"
)
func platformTestSetup(client *Client) error {
return nil
}

View File

@ -7,9 +7,6 @@ import (
winio "github.com/Microsoft/go-winio" winio "github.com/Microsoft/go-winio"
) )
// DefaultAddress is the default unix socket address
const DefaultAddress = `\\.\pipe\containerd-containerd`
func dialer(address string, timeout time.Duration) (net.Conn, error) { func dialer(address string, timeout time.Duration) (net.Conn, error) {
return winio.DialPipe(address, &timeout) return winio.DialPipe(address, &timeout)
} }

87
client_windows_test.go Normal file
View File

@ -0,0 +1,87 @@
package containerd
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
)
const (
defaultAddress = `\\.\pipe\containerd-containerd-test`
testImage = "docker.io/library/go:nanoserver"
)
var (
dockerLayerFolders []string
defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test")
)
func platformTestSetup(client *Client) error {
var (
roots []string
layerChains = make(map[string]string)
)
// Since we can't pull images yet, we'll piggyback on the default
// docker's images
wfPath := `C:\ProgramData\docker\windowsfilter`
wf, err := os.Open(wfPath)
if err != nil {
return errors.Wrapf(err, "failed to access docker layers @ %s", wfPath)
}
defer wf.Close()
entries, err := wf.Readdirnames(0)
if err != nil {
return errors.Wrapf(err, "failed to read %s entries", wfPath)
}
for _, fn := range entries {
layerChainPath := filepath.Join(wfPath, fn, "layerchain.json")
lfi, err := os.Stat(layerChainPath)
switch {
case err == nil && lfi.Mode().IsRegular():
f, err := os.OpenFile(layerChainPath, os.O_RDONLY, 0660)
if err != nil {
fmt.Fprintln(os.Stderr,
errors.Wrapf(err, "failed to open %s", layerChainPath))
continue
}
defer f.Close()
l := make([]string, 0)
if err := json.NewDecoder(f).Decode(&l); err != nil {
fmt.Fprintln(os.Stderr,
errors.Wrapf(err, "failed to decode %s", layerChainPath))
continue
}
switch {
case len(l) == 1:
layerChains[l[0]] = filepath.Join(wfPath, fn)
case len(l) > 1:
fmt.Fprintf(os.Stderr, "Too many entries in %s: %d", layerChainPath, len(l))
case len(l) == 0:
roots = append(roots, filepath.Join(wfPath, fn))
}
case os.IsNotExist(err):
// keep on going
default:
return errors.Wrapf(err, "error trying to access %s", layerChainPath)
}
}
// They'll be 2 roots, just take the first one
l := roots[0]
dockerLayerFolders = append(dockerLayerFolders, l)
for {
l = layerChains[l]
if l == "" {
break
}
dockerLayerFolders = append([]string{l}, dockerLayerFolders...)
}
return nil
}

View File

@ -1,7 +1,6 @@
package main package main
import ( import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/server" "github.com/containerd/containerd/server"
) )
@ -9,7 +8,7 @@ func defaultConfig() *server.Config {
return &server.Config{ return &server.Config{
Root: "/var/lib/containerd", Root: "/var/lib/containerd",
GRPC: server.GRPCConfig{ GRPC: server.GRPCConfig{
Address: containerd.DefaultAddress, Address: server.DefaultAddress,
}, },
Debug: server.Debug{ Debug: server.Debug{
Level: "info", Level: "info",

View File

@ -8,7 +8,7 @@ func defaultConfig() *server.Config {
return &server.Config{ return &server.Config{
Root: "/var/lib/containerd", Root: "/var/lib/containerd",
GRPC: server.GRPCConfig{ GRPC: server.GRPCConfig{
Address: "/run/containerd/containerd.sock", Address: server.DefaultAddress,
}, },
Debug: server.Debug{ Debug: server.Debug{
Level: "info", Level: "info",

View File

@ -40,12 +40,12 @@ var deleteCommand = cli.Command{
if err != nil { if err != nil {
return err return err
} }
if status == containerd.Stopped { if status == containerd.Stopped || status == containerd.Created {
if _, err := task.Delete(ctx); err != nil { if _, err := task.Delete(ctx); err != nil {
return err return err
} }
return container.Delete(ctx, deleteOpts...) return container.Delete(ctx, deleteOpts...)
} }
return fmt.Errorf("cannot delete a container with an existing task") return fmt.Errorf("cannot delete a non stopped container: %v", status)
}, },
} }

View File

@ -5,8 +5,8 @@ import (
"os" "os"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containerd/containerd"
"github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/server"
"github.com/containerd/containerd/version" "github.com/containerd/containerd/version"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -40,7 +40,7 @@ containerd CLI
cli.StringFlag{ cli.StringFlag{
Name: "address, a", Name: "address, a",
Usage: "address for containerd's GRPC server", Usage: "address for containerd's GRPC server",
Value: containerd.DefaultAddress, Value: server.DefaultAddress,
}, },
cli.DurationFlag{ cli.DurationFlag{
Name: "timeout", Name: "timeout",
@ -58,31 +58,32 @@ containerd CLI
}, },
} }
app.Commands = append([]cli.Command{ app.Commands = append([]cli.Command{
imageCommand, applyCommand,
pullCommand, attachCommand,
checkpointCommand,
containersCommand,
contentCommand,
deleteCommand,
eventsCommand,
execCommand,
fetchCommand, fetchCommand,
fetchObjectCommand, fetchObjectCommand,
pushCommand, imageCommand,
pushObjectCommand,
containersCommand,
checkpointCommand,
runCommand,
attachCommand,
deleteCommand,
namespacesCommand,
eventsCommand,
taskListCommand,
infoCommand, infoCommand,
killCommand, killCommand,
pprofCommand, namespacesCommand,
execCommand,
pauseCommand, pauseCommand,
resumeCommand, pprofCommand,
snapshotCommand,
versionCommand,
psCommand, psCommand,
applyCommand, pullCommand,
pushCommand,
pushObjectCommand,
resumeCommand,
rootfsCommand, rootfsCommand,
runCommand,
snapshotCommand,
taskListCommand,
versionCommand,
}, extraCmds...) }, extraCmds...)
app.Before = func(context *cli.Context) error { app.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") { if context.GlobalBool("debug") {

View File

@ -3,13 +3,12 @@ package main
import ( import (
"fmt" "fmt"
"io" "io"
"net"
"net/http" "net/http"
"os" "os"
"time" "time"
"github.com/containerd/containerd/server"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -25,7 +24,7 @@ var pprofCommand = cli.Command{
cli.StringFlag{ cli.StringFlag{
Name: "debug-socket, d", Name: "debug-socket, d",
Usage: "socket path for containerd's debug server", Usage: "socket path for containerd's debug server",
Value: "/run/containerd/debug.sock", Value: server.DefaultDebugAddress,
}, },
}, },
Subcommands: []cli.Command{ Subcommands: []cli.Command{
@ -143,13 +142,8 @@ var pprofThreadcreateCommand = cli.Command{
}, },
} }
func (d *pprofDialer) pprofDial(proto, addr string) (conn net.Conn, err error) {
return net.Dial(d.proto, d.addr)
}
func getPProfClient(context *cli.Context) *http.Client { func getPProfClient(context *cli.Context) *http.Client {
addr := context.GlobalString("debug-socket") dialer := getPProfDialer(context.GlobalString("debug-socket"))
dialer := pprofDialer{"unix", addr}
tr := &http.Transport{ tr := &http.Transport{
Dial: dialer.pprofDial, Dial: dialer.pprofDial,

13
cmd/ctr/pprof_unix.go Normal file
View File

@ -0,0 +1,13 @@
// +build !windows
package main
import "net"
func (d *pprofDialer) pprofDial(proto, addr string) (conn net.Conn, err error) {
return net.Dial(d.proto, d.addr)
}
func getPProfDialer(addr string) *pprofDialer {
return &pprofDialer{"unix", addr}
}

15
cmd/ctr/pprof_windows.go Normal file
View File

@ -0,0 +1,15 @@
package main
import (
"net"
winio "github.com/Microsoft/go-winio"
)
func (d *pprofDialer) pprofDial(proto, addr string) (conn net.Conn, err error) {
return winio.DialPipe(d.addr, nil)
}
func getPProfDialer(addr string) *pprofDialer {
return &pprofDialer{"winpipe", addr}
}

View File

@ -25,7 +25,7 @@ func withEnv(context *cli.Context) containerd.SpecOpts {
return func(s *specs.Spec) error { return func(s *specs.Spec) error {
env := context.StringSlice("env") env := context.StringSlice("env")
if len(env) > 0 { if len(env) > 0 {
s.Process.Env = append(s.Process.Env, env...) s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, env)
} }
return nil return nil
} }

View File

@ -2,22 +2,16 @@ package main
import ( import (
gocontext "context" gocontext "context"
"encoding/json"
"fmt"
"io/ioutil"
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containerd/console" "github.com/containerd/console"
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/windows"
"github.com/containerd/containerd/windows/hcs"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -25,126 +19,20 @@ const pipeRoot = `\\.\pipe`
func init() { func init() {
runCommand.Flags = append(runCommand.Flags, cli.StringSliceFlag{ runCommand.Flags = append(runCommand.Flags, cli.StringSliceFlag{
Name: "layers", Name: "layer",
Usage: "HCSSHIM Layers to be used", Usage: "HCSSHIM Layers to be used",
}) })
} }
func spec(id string, config *ocispec.ImageConfig, context *cli.Context) *specs.Spec { func withLayers(context *cli.Context) containerd.SpecOpts {
cmd := config.Cmd return func(s *specs.Spec) error {
if a := context.Args().First(); a != "" { l := context.StringSlice("layer")
cmd = context.Args() if l == nil {
} return errors.Wrap(errdefs.ErrInvalidArgument, "base layers must be specified with `--layer`")
var (
// TODO: support overriding entrypoint
args = append(config.Entrypoint, cmd...)
tty = context.Bool("tty")
cwd = config.WorkingDir
)
if cwd == "" {
cwd = `C:\`
}
// Some sane defaults for console
w := 80
h := 20
if tty {
con := console.Current()
size, err := con.Size()
if err == nil {
w = int(size.Width)
h = int(size.Height)
} }
s.Windows.LayerFolders = l
return nil
} }
env := replaceOrAppendEnvValues(config.Env, context.StringSlice("env"))
return &specs.Spec{
Version: specs.Version,
Root: &specs.Root{
Readonly: context.Bool("readonly"),
},
Process: &specs.Process{
Args: args,
Terminal: tty,
Cwd: cwd,
Env: env,
User: specs.User{
Username: config.User,
},
ConsoleSize: &specs.Box{
Height: uint(w),
Width: uint(h),
},
},
Hostname: id,
}
}
func customSpec(context *cli.Context, configPath, rootfs string) (*specs.Spec, error) {
b, err := ioutil.ReadFile(configPath)
if err != nil {
return nil, err
}
var s specs.Spec
if err := json.Unmarshal(b, &s); err != nil {
return nil, err
}
if rootfs != "" && s.Root.Path != rootfs {
logrus.Warnf("ignoring config Root.Path %q, setting %q forcibly", s.Root.Path, rootfs)
s.Root.Path = rootfs
}
return &s, nil
}
func getConfig(context *cli.Context, imageConfig *ocispec.ImageConfig, rootfs string) (*specs.Spec, error) {
if config := context.String("runtime-config"); config != "" {
return customSpec(context, config, rootfs)
}
s := spec(context.String("id"), imageConfig, context)
if rootfs != "" {
s.Root.Path = rootfs
}
return s, nil
}
func newContainerSpec(context *cli.Context, config *ocispec.ImageConfig, imageRef string) ([]byte, error) {
spec, err := getConfig(context, config, context.String("rootfs"))
if err != nil {
return nil, err
}
if spec.Annotations == nil {
spec.Annotations = make(map[string]string)
}
spec.Annotations["image"] = imageRef
rtSpec := windows.RuntimeSpec{
OCISpec: *spec,
Configuration: hcs.Configuration{
Layers: context.StringSlice("layers"),
IgnoreFlushesDuringBoot: true,
AllowUnqualifiedDNSQuery: true},
}
return json.Marshal(rtSpec)
}
func newCreateTaskRequest(context *cli.Context, id, tmpDir string, checkpoint *ocispec.Descriptor, mounts []mount.Mount) (*tasks.CreateTaskRequest, error) {
create := &tasks.CreateTaskRequest{
ContainerID: id,
Terminal: context.Bool("tty"),
Stdin: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
Stdout: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
}
if !create.Terminal {
create.Stderr = fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id)
}
return create, nil
} }
func handleConsoleResize(ctx gocontext.Context, task resizer, con console.Console) error { func handleConsoleResize(ctx gocontext.Context, task resizer, con console.Console) error {
@ -175,7 +63,14 @@ func handleConsoleResize(ctx gocontext.Context, task resizer, con console.Consol
return nil return nil
} }
func withTTY() containerd.SpecOpts { func withTTY(terminal bool) containerd.SpecOpts {
if !terminal {
return func(s *specs.Spec) error {
s.Process.Terminal = false
return nil
}
}
con := console.Current() con := console.Current()
size, err := con.Size() size, err := con.Size()
if err != nil { if err != nil {
@ -192,43 +87,37 @@ func newContainer(ctx gocontext.Context, client *containerd.Client, context *cli
var ( var (
err error err error
ref = context.Args().First() // ref = context.Args().First()
id = context.Args().Get(1) id = context.Args().Get(1)
args = context.Args()[2:] args = context.Args()[2:]
tty = context.Bool("tty") tty = context.Bool("tty")
labelStrings = context.StringSlice("label")
) )
image, err := client.GetImage(ctx, ref)
if err != nil { labels := labelArgs(labelStrings)
return nil, err
} // TODO(mlaventure): get base image once we have a snapshotter
opts := []containerd.SpecOpts{ opts := []containerd.SpecOpts{
containerd.WithImageConfig(ctx, image), // TODO(mlaventure): use containerd.WithImageConfig once we have a snapshotter
withLayers(context),
withEnv(context), withEnv(context),
withMounts(context), withMounts(context),
withTTY(tty),
} }
if len(args) > 0 { if len(args) > 0 {
opts = append(opts, containerd.WithProcessArgs(args...)) opts = append(opts, containerd.WithProcessArgs(args...))
} }
if tty {
opts = append(opts, withTTY())
}
if context.Bool("net-host") {
opts = append(opts, setHostNetworking())
}
spec, err := containerd.GenerateSpec(opts...) spec, err := containerd.GenerateSpec(opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var rootfs containerd.NewContainerOpts
if context.Bool("readonly") {
rootfs = containerd.WithNewReadonlyRootFS(id, image)
} else {
rootfs = containerd.WithNewRootFS(id, image)
}
return client.NewContainer(ctx, id, return client.NewContainer(ctx, id,
containerd.WithSpec(spec), containerd.WithSpec(spec),
containerd.WithImage(image), containerd.WithContainerLabels(labels),
rootfs, // TODO(mlaventure): containerd.WithImage(image),
) )
} }

View File

@ -157,7 +157,7 @@ type NewTaskOpts func(context.Context, *Client, *TaskInfo) error
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) { func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
i, err := ioCreate() i, err := ioCreate(c.c.ID)
if err != nil { if err != nil {
return nil, err return nil, err
} }

100
container_linux_test.go Normal file
View File

@ -0,0 +1,100 @@
// +build linux
package containerd
import (
"syscall"
"testing"
"github.com/containerd/cgroups"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func TestContainerUpdate(t *testing.T) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext()
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
spec, err := generateSpec(WithImageConfig(ctx, image), withProcessArgs("sleep", "30"))
if err != nil {
t.Error(err)
return
}
limit := int64(32 * 1024 * 1024)
spec.Linux.Resources.Memory = &specs.LinuxMemory{
Limit: &limit,
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithRootFSDeletion)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC := make(chan uint32, 1)
go func() {
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
statusC <- status
}()
// check that the task has a limit of 32mb
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
if err != nil {
t.Error(err)
return
}
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
if err != nil {
t.Error(err)
return
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
return
}
limit = 64 * 1024 * 1024
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
Memory: &specs.LinuxMemory{
Limit: &limit,
},
})); err != nil {
t.Error(err)
}
// check that the task has a limit of 64mb
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
t.Error(err)
return
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Error(err)
return
}
<-statusC
}

View File

@ -6,13 +6,16 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"runtime"
"strings"
"sync" "sync"
"syscall" "syscall"
"testing" "testing"
"github.com/containerd/cgroups" // Register the typeurl
_ "github.com/containerd/containerd/runtime"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
specs "github.com/opencontainers/runtime-spec/specs-go"
) )
func empty() IOCreation { func empty() IOCreation {
@ -48,7 +51,7 @@ func TestNewContainer(t *testing.T) {
} }
defer client.Close() defer client.Close()
spec, err := GenerateSpec() spec, err := generateSpec()
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -84,22 +87,26 @@ func TestContainerStart(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withExitStatus(7))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sh", "-c", "exit 7")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -151,23 +158,27 @@ func TestContainerOutput(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
expected = "kingkoye" expected = "kingkoye"
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("echo", expected))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("echo", expected)) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -207,7 +218,7 @@ func TestContainerOutput(t *testing.T) {
actual := stdout.String() actual := stdout.String()
// echo adds a new line // echo adds a new line
expected = expected + "\n" expected = expected + newLine
if actual != expected { if actual != expected {
t.Errorf("expected output %q but received %q", expected, actual) t.Errorf("expected output %q but received %q", expected, actual)
} }
@ -221,22 +232,26 @@ func TestContainerExec(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -258,12 +273,14 @@ func TestContainerExec(t *testing.T) {
close(finished) close(finished)
}() }()
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
// start an exec process without running the original container process info // start an exec process without running the original container process info
processSpec := spec.Process processSpec := spec.Process
processSpec.Args = []string{ withExecExitStatus(processSpec, 6)
"sh", "-c",
"exit 6",
}
execID := t.Name() + "_exec" execID := t.Name() + "_exec"
process, err := task.Exec(ctx, execID, processSpec, empty()) process, err := task.Exec(ctx, execID, processSpec, empty())
if err != nil { if err != nil {
@ -275,7 +292,6 @@ func TestContainerExec(t *testing.T) {
status, err := process.Wait(ctx) status, err := process.Wait(ctx)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return
} }
processStatusC <- status processStatusC <- status
}() }()
@ -305,7 +321,7 @@ func TestContainerExec(t *testing.T) {
<-finished <-finished
} }
func TestContainerProcesses(t *testing.T) { func TestContainerPids(t *testing.T) {
client, err := newClient(t, address) client, err := newClient(t, address)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -313,22 +329,26 @@ func TestContainerProcesses(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -351,6 +371,11 @@ func TestContainerProcesses(t *testing.T) {
statusC <- status statusC <- status
}() }()
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
pid := task.Pid() pid := task.Pid()
if pid <= 0 { if pid <= 0 {
t.Errorf("invalid task pid %d", pid) t.Errorf("invalid task pid %d", pid)
@ -383,29 +408,33 @@ func TestContainerCloseIO(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withCat())
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("cat")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
defer container.Delete(ctx, WithRootFSDeletion) defer container.Delete(ctx, WithRootFSDeletion)
const expected = "hello\n" const expected = "hello" + newLine
stdout := bytes.NewBuffer(nil) stdout := bytes.NewBuffer(nil)
r, w, err := os.Pipe() r, w, err := os.Pipe()
@ -451,12 +480,25 @@ func TestContainerCloseIO(t *testing.T) {
output := stdout.String() output := stdout.String()
if runtime.GOOS == "windows" {
// On windows we use more and it always adds an extra newline
// remove it here
output = strings.TrimSuffix(output, newLine)
}
if output != expected { if output != expected {
t.Errorf("expected output %q but received %q", expected, output) t.Errorf("expected output %q but received %q", expected, output)
} }
} }
func TestContainerAttach(t *testing.T) { func TestContainerAttach(t *testing.T) {
if runtime.GOOS == "windows" {
// On windows, closing the write side of the pipe closes the read
// side, sending an EOF to it and preventing reopening it.
// Hence this test will always fails on windows
t.Skip("invalid logic on windows")
}
client, err := newClient(t, address) client, err := newClient(t, address)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -464,29 +506,33 @@ func TestContainerAttach(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withCat())
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("cat")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
defer container.Delete(ctx, WithRootFSDeletion) defer container.Delete(ctx, WithRootFSDeletion)
expected := "hello\n" expected := "hello" + newLine
stdout := bytes.NewBuffer(nil) stdout := bytes.NewBuffer(nil)
r, w, err := os.Pipe() r, w, err := os.Pipe()
@ -586,22 +632,26 @@ func TestDeleteRunningContainer(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithImage(image), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -651,22 +701,26 @@ func TestContainerKill(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withCat())
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sh", "-c", "cat")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithImage(image), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -709,95 +763,6 @@ func TestContainerKill(t *testing.T) {
} }
} }
func TestContainerUpdate(t *testing.T) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext()
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "30"))
if err != nil {
t.Error(err)
return
}
limit := int64(32 * 1024 * 1024)
spec.Linux.Resources.Memory = &specs.LinuxMemory{
Limit: &limit,
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithRootFSDeletion)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC := make(chan uint32, 1)
go func() {
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
statusC <- status
}()
// check that the task has a limit of 32mb
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
if err != nil {
t.Error(err)
return
}
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
if err != nil {
t.Error(err)
return
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
return
}
limit = 64 * 1024 * 1024
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
Memory: &specs.LinuxMemory{
Limit: &limit,
},
})); err != nil {
t.Error(err)
}
// check that the task has a limit of 64mb
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
t.Error(err)
return
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Error(err)
return
}
<-statusC
}
func TestContainerNoBinaryExists(t *testing.T) { func TestContainerNoBinaryExists(t *testing.T) {
client, err := newClient(t, address) client, err := newClient(t, address)
if err != nil { if err != nil {
@ -806,30 +771,47 @@ func TestContainerNoBinaryExists(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("nothing"))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("nothing")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
defer container.Delete(ctx, WithRootFSDeletion) defer container.Delete(ctx, WithRootFSDeletion)
if _, err := container.NewTask(ctx, Stdio); err == nil { task, err := container.NewTask(ctx, Stdio)
t.Error("NewTask should return an error when binary does not exist") switch runtime.GOOS {
case "windows":
if err != nil {
t.Errorf("failed to create task %v", err)
}
if err := task.Start(ctx); err != nil {
t.Error("task.Start() should return an error when binary does not exist")
task.Delete(ctx)
}
default:
if err == nil {
t.Error("NewTask should return an error when binary does not exist")
task.Delete(ctx)
}
} }
} }
@ -841,22 +823,26 @@ func TestContainerExecNoBinaryExists(t *testing.T) {
defer client.Close() defer client.Close()
var ( var (
image Image
ctx, cancel = testContext() ctx, cancel = testContext()
id = t.Name() id = t.Name()
) )
defer cancel() defer cancel()
image, err := client.GetImage(ctx, testImage) if runtime.GOOS != "windows" {
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100")) container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -870,6 +856,11 @@ func TestContainerExecNoBinaryExists(t *testing.T) {
} }
defer task.Delete(ctx) defer task.Delete(ctx)
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
finished := make(chan struct{}, 1) finished := make(chan struct{}, 1)
go func() { go func() {
if _, err := task.Wait(ctx); err != nil { if _, err := task.Wait(ctx); err != nil {

View File

@ -258,9 +258,11 @@ func checkBlobPath(t *testing.T, cs Store, dgst digest.Digest) string {
t.Fatalf("error stating blob path: %v", err) t.Fatalf("error stating blob path: %v", err)
} }
// ensure that only read bits are set. if runtime.GOOS != "windows" {
if ((fi.Mode() & os.ModePerm) & 0333) != 0 { // ensure that only read bits are set.
t.Fatalf("incorrect permissions: %v", fi.Mode()) if ((fi.Mode() & os.ModePerm) & 0333) != 0 {
t.Fatalf("incorrect permissions: %v", fi.Mode())
}
} }
return path return path

View File

@ -3,6 +3,7 @@ package content
import ( import (
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"time" "time"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
@ -67,8 +68,12 @@ func (w *writer) Commit(size int64, expected digest.Digest) error {
// only allowing reads honoring the umask on creation. // only allowing reads honoring the umask on creation.
// //
// This removes write and exec, only allowing read per the creation umask. // This removes write and exec, only allowing read per the creation umask.
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil { //
return errors.Wrap(err, "failed to change ingest file permissions") // NOTE: Windows does not support this operation
if runtime.GOOS != "windows" {
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
return errors.Wrap(err, "failed to change ingest file permissions")
}
} }
if size > 0 && size != fi.Size() { if size > 0 && size != fi.Size() {

View File

@ -21,7 +21,7 @@ import "github.com/pkg/errors"
// map very well to those defined by grpc. // map very well to those defined by grpc.
var ( var (
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
ErrInvalidArgument = errors.New("invalid") ErrInvalidArgument = errors.New("invalid argument")
ErrNotFound = errors.New("not found") ErrNotFound = errors.New("not found")
ErrAlreadyExists = errors.New("already exists") ErrAlreadyExists = errors.New("already exists")
ErrFailedPrecondition = errors.New("failed precondition") ErrFailedPrecondition = errors.New("failed precondition")

View File

@ -47,7 +47,9 @@ func (e *Emitter) Events(ctx context.Context, clientID string) chan *events.Enve
ns: ns, ns: ns,
} }
e.sinks[clientID] = s e.sinks[clientID] = s
e.m.Unlock()
e.broadcaster.Add(s) e.broadcaster.Add(s)
return s.ch
} }
ch := e.sinks[clientID].ch ch := e.sinks[clientID].ch
e.m.Unlock() e.m.Unlock()

View File

@ -13,7 +13,7 @@ type resourceUpdate struct {
} }
func (u resourceUpdate) String() string { func (u resourceUpdate) String() string {
return fmt.Sprintf("%s(mode: %o, uid: %s, gid: %s) -> %s(mode: %o, uid: %s, gid: %s)", return fmt.Sprintf("%s(mode: %o, uid: %d, gid: %d) -> %s(mode: %o, uid: %d, gid: %d)",
u.Original.Path(), u.Original.Mode(), u.Original.UID(), u.Original.GID(), u.Original.Path(), u.Original.Mode(), u.Original.UID(), u.Original.GID(),
u.Updated.Path(), u.Updated.Mode(), u.Updated.UID(), u.Updated.GID(), u.Updated.Path(), u.Updated.Mode(), u.Updated.UID(), u.Updated.GID(),
) )

51
helpers_unix_test.go Normal file
View File

@ -0,0 +1,51 @@
// +build !windows
package containerd
import (
"context"
"fmt"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const newLine = "\n"
func generateSpec(opts ...SpecOpts) (*specs.Spec, error) {
return GenerateSpec(opts...)
}
func withExitStatus(es int) SpecOpts {
return func(s *specs.Spec) error {
s.Process.Args = []string{"sh", "-c", fmt.Sprintf("exit %d", es)}
return nil
}
}
func withProcessArgs(args ...string) SpecOpts {
return WithProcessArgs(args...)
}
func withCat() SpecOpts {
return WithProcessArgs("cat")
}
func withTrue() SpecOpts {
return WithProcessArgs("true")
}
func withExecExitStatus(s *specs.Process, es int) {
s.Args = []string{"sh", "-c", fmt.Sprintf("exit %d", es)}
}
func withExecArgs(s *specs.Process, args ...string) {
s.Args = args
}
func withImageConfig(ctx context.Context, i Image) SpecOpts {
return WithImageConfig(ctx, i)
}
func withNewRootFS(id string, i Image) NewContainerOpts {
return WithNewRootFS(id, i)
}

65
helpers_windows_test.go Normal file
View File

@ -0,0 +1,65 @@
// +build windows
package containerd
import (
"context"
"strconv"
"github.com/containerd/containerd/containers"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const newLine = "\r\n"
func generateSpec(opts ...SpecOpts) (*specs.Spec, error) {
spec, err := GenerateSpec(opts...)
if err != nil {
return nil, err
}
spec.Windows.LayerFolders = dockerLayerFolders
return spec, nil
}
func withExitStatus(es int) SpecOpts {
return func(s *specs.Spec) error {
s.Process.Args = []string{"powershell", "-noprofile", "exit", strconv.Itoa(es)}
return nil
}
}
func withProcessArgs(args ...string) SpecOpts {
return WithProcessArgs(append([]string{"powershell", "-noprofile"}, args...)...)
}
func withCat() SpecOpts {
return WithProcessArgs("cmd", "/c", "more")
}
func withTrue() SpecOpts {
return WithProcessArgs("cmd", "/c")
}
func withExecExitStatus(s *specs.Process, es int) {
s.Args = []string{"powershell", "-noprofile", "exit", strconv.Itoa(es)}
}
func withExecArgs(s *specs.Process, args ...string) {
s.Args = append([]string{"powershell", "-noprofile"}, args...)
}
func withImageConfig(ctx context.Context, i Image) SpecOpts {
// TODO: when windows has a snapshotter remove the withImageConfig helper
return func(s *specs.Spec) error {
return nil
}
}
func withNewRootFS(id string, i Image) NewContainerOpts {
// TODO: when windows has a snapshotter remove the withNewRootFS helper
return func(ctx context.Context, client *Client, c *containers.Container) error {
return nil
}
}

35
io.go
View File

@ -4,9 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath"
"sync" "sync"
) )
@ -40,7 +38,7 @@ func (i *IO) Close() error {
return i.closer.Close() return i.closer.Close()
} }
type IOCreation func() (*IO, error) type IOCreation func(id string) (*IO, error)
type IOAttach func(*FIFOSet) (*IO, error) type IOAttach func(*FIFOSet) (*IO, error)
@ -49,8 +47,8 @@ func NewIO(stdin io.Reader, stdout, stderr io.Writer) IOCreation {
} }
func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation { func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation {
return func() (*IO, error) { return func(id string) (*IO, error) {
paths, err := NewFifos() paths, err := NewFifos(id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -72,7 +70,6 @@ func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool)
i.closer = closer i.closer = closer
return i, nil return i, nil
} }
} }
func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach { func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
@ -102,31 +99,13 @@ func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
// Stdio returns an IO implementation to be used for a task // Stdio returns an IO implementation to be used for a task
// that outputs the container's IO as the current processes Stdio // that outputs the container's IO as the current processes Stdio
func Stdio() (*IO, error) { func Stdio(id string) (*IO, error) {
return NewIO(os.Stdin, os.Stdout, os.Stderr)() return NewIO(os.Stdin, os.Stdout, os.Stderr)(id)
} }
// StdioTerminal will setup the IO for the task to use a terminal // StdioTerminal will setup the IO for the task to use a terminal
func StdioTerminal() (*IO, error) { func StdioTerminal(id string) (*IO, error) {
return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)() return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)(id)
}
// NewFifos returns a new set of fifos for the task
func NewFifos() (*FIFOSet, error) {
root := filepath.Join(os.TempDir(), "containerd")
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
dir, err := ioutil.TempDir(root, "")
if err != nil {
return nil, err
}
return &FIFOSet{
Dir: dir,
In: filepath.Join(dir, "stdin"),
Out: filepath.Join(dir, "stdout"),
Err: filepath.Join(dir, "stderr"),
}, nil
} }
type FIFOSet struct { type FIFOSet struct {

View File

@ -5,12 +5,33 @@ package containerd
import ( import (
"context" "context"
"io" "io"
"io/ioutil"
"os"
"path/filepath"
"sync" "sync"
"syscall" "syscall"
"github.com/containerd/fifo" "github.com/containerd/fifo"
) )
// NewFifos returns a new set of fifos for the task
func NewFifos(id string) (*FIFOSet, error) {
root := filepath.Join(os.TempDir(), "containerd")
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
dir, err := ioutil.TempDir(root, "")
if err != nil {
return nil, err
}
return &FIFOSet{
Dir: dir,
In: filepath.Join(dir, id+"-stdin"),
Out: filepath.Join(dir, id+"-stdout"),
Err: filepath.Join(dir, id+"-stderr"),
}, nil
}
func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
var ( var (
f io.ReadWriteCloser f io.ReadWriteCloser

View File

@ -1,6 +1,7 @@
package containerd package containerd
import ( import (
"fmt"
"io" "io"
"net" "net"
"sync" "sync"
@ -10,8 +11,22 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
const pipeRoot = `\\.\pipe`
// NewFifos returns a new set of fifos for the task
func NewFifos(id string) (*FIFOSet, error) {
return &FIFOSet{
In: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
Out: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
Err: fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id),
}, nil
}
func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
var wg sync.WaitGroup var (
wg sync.WaitGroup
set []io.Closer
)
if fifos.In != "" { if fifos.In != "" {
l, err := winio.ListenPipe(fifos.In, nil) l, err := winio.ListenPipe(fifos.In, nil)
@ -23,6 +38,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
l.Close() l.Close()
} }
}(l) }(l)
set = append(set, l)
go func() { go func() {
c, err := l.Accept() c, err := l.Accept()
@ -46,6 +62,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
l.Close() l.Close()
} }
}(l) }(l)
set = append(set, l)
wg.Add(1) wg.Add(1)
go func() { go func() {
@ -71,6 +88,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
l.Close() l.Close()
} }
}(l) }(l)
set = append(set, l)
wg.Add(1) wg.Add(1)
go func() { go func() {
@ -89,5 +107,11 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
return &wgCloser{ return &wgCloser{
wg: &wg, wg: &wg,
dir: fifos.Dir, dir: fifos.Dir,
set: set,
cancel: func() {
for _, l := range set {
l.Close()
}
},
}, nil }, nil
} }

View File

@ -42,7 +42,7 @@ type Task interface {
Pids(context.Context) ([]uint32, error) Pids(context.Context) ([]uint32, error)
// Checkpoint checkpoints a container to an image with live system data // Checkpoint checkpoints a container to an image with live system data
Checkpoint(context.Context, string, *types.Any) error Checkpoint(context.Context, string, *types.Any) error
// DeleteProcess deletes a specific exec process via the pid // DeleteProcess deletes a specific exec process via its id
DeleteProcess(context.Context, string) (*Exit, error) DeleteProcess(context.Context, string) (*Exit, error)
// Update sets the provided resources to a running task // Update sets the provided resources to a running task
Update(context.Context, *types.Any) error Update(context.Context, *types.Any) error

17
runtime/typeurl.go Normal file
View File

@ -0,0 +1,17 @@
package runtime
import (
"strconv"
"github.com/containerd/containerd/typeurl"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func init() {
// register TypeUrls for commonly marshaled external types
major := strconv.Itoa(specs.VersionMajor)
typeurl.Register(&specs.Spec{}, "opencontainers/runtime-spec", major, "Spec")
typeurl.Register(&specs.Process{}, "opencontainers/runtime-spec", major, "Process")
typeurl.Register(&specs.LinuxResources{}, "opencontainers/runtime-spec", major, "LinuxResources")
typeurl.Register(&specs.WindowsResources{}, "opencontainers/runtime-spec", major, "WindowsResources")
}

View File

@ -8,6 +8,13 @@ import (
"github.com/containerd/containerd/sys" "github.com/containerd/containerd/sys"
) )
const (
// DefaultAddress is the default unix socket address
DefaultAddress = "/run/containerd/containerd.sock"
// DefaultDebuggAddress is the default unix socket address for pprof data
DefaultDebugAddress = "/run/containerd/debug.sock"
)
// apply sets config settings on the server process // apply sets config settings on the server process
func apply(ctx context.Context, config *Config) error { func apply(ctx context.Context, config *Config) error {
if config.Subreaper { if config.Subreaper {

View File

@ -1,9 +1,16 @@
// +build !linux // +build !linux,!windows
package server package server
import "context" import "context"
const (
// DefaultAddress is the default unix socket address
DefaultAddress = "/run/containerd/containerd.sock"
// DefaultDebuggAddress is the default unix socket address for pprof data
DefaultDebugAddress = "/run/containerd/debug.sock"
)
func apply(_ context.Context, _ *Config) error { func apply(_ context.Context, _ *Config) error {
return nil return nil
} }

16
server/server_windows.go Normal file
View File

@ -0,0 +1,16 @@
// +build windows
package server
import "context"
const (
// DefaultAddress is the default winpipe address
DefaultAddress = `\\.\pipe\containerd-containerd`
// DefaultDebugAddress is the default winpipe address for pprof data
DefaultDebugAddress = `\\.\pipe\containerd-debug`
)
func apply(_ context.Context, _ *Config) error {
return nil
}

View File

@ -12,18 +12,23 @@ import (
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
) )
const pipeRoot = `\\.\pipe`
func createDefaultSpec() (*specs.Spec, error) { func createDefaultSpec() (*specs.Spec, error) {
return &specs.Spec{ return &specs.Spec{
Version: specs.Version, Version: specs.Version,
Root: &specs.Root{}, Root: &specs.Root{},
Process: &specs.Process{ Process: &specs.Process{
Cwd: `C:\`,
ConsoleSize: &specs.Box{ ConsoleSize: &specs.Box{
Width: 80, Width: 80,
Height: 20, Height: 20,
}, },
}, },
Windows: &specs.Windows{
IgnoreFlushesDuringBoot: true,
Network: &specs.WindowsNetwork{
AllowUnqualifiedDNSQuery: true,
},
},
}, nil }, nil
} }

View File

@ -188,6 +188,7 @@ func (t *task) Wait(ctx context.Context) (uint32, error) {
// during cleanup // during cleanup
func (t *task) Delete(ctx context.Context) (uint32, error) { func (t *task) Delete(ctx context.Context) (uint32, error) {
if t.io != nil { if t.io != nil {
t.io.Cancel()
t.io.Wait() t.io.Wait()
t.io.Close() t.io.Close()
} }
@ -204,7 +205,7 @@ func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreat
if id == "" { if id == "" {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty") return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty")
} }
i, err := ioCreate() i, err := ioCreate(id)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -2,15 +2,11 @@ package testutil
import ( import (
"flag" "flag"
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
"testing" "testing"
"github.com/containerd/containerd/mount"
"github.com/stretchr/testify/assert"
) )
var rootEnabled bool var rootEnabled bool
@ -19,36 +15,6 @@ func init() {
flag.BoolVar(&rootEnabled, "test.root", false, "enable tests that require root") flag.BoolVar(&rootEnabled, "test.root", false, "enable tests that require root")
} }
// Unmount unmounts a given mountPoint and sets t.Error if it fails
func Unmount(t *testing.T, mountPoint string) {
t.Log("unmount", mountPoint)
if err := mount.Unmount(mountPoint, 0); err != nil {
t.Error("Could not umount", mountPoint, err)
}
}
// RequiresRoot skips tests that require root, unless the test.root flag has
// been set
func RequiresRoot(t testing.TB) {
if !rootEnabled {
t.Skip("skipping test that requires root")
return
}
assert.Equal(t, 0, os.Getuid(), "This test must be run as root.")
}
// RequiresRootM is similar to RequiresRoot but intended to be called from *testing.M.
func RequiresRootM() {
if !rootEnabled {
fmt.Fprintln(os.Stderr, "skipping test that requires root")
os.Exit(0)
}
if 0 != os.Getuid() {
fmt.Fprintln(os.Stderr, "This test must be run as root.")
os.Exit(1)
}
}
// DumpDir will log out all of the contents of the provided directory to // DumpDir will log out all of the contents of the provided directory to
// testing logger. // testing logger.
// //

42
testutil/helpers_unix.go Normal file
View File

@ -0,0 +1,42 @@
// +build !windows
package testutil
import (
"fmt"
"os"
"testing"
"github.com/containerd/containerd/mount"
"github.com/stretchr/testify/assert"
)
// Unmount unmounts a given mountPoint and sets t.Error if it fails
func Unmount(t *testing.T, mountPoint string) {
t.Log("unmount", mountPoint)
if err := mount.Unmount(mountPoint, 0); err != nil {
t.Error("Could not umount", mountPoint, err)
}
}
// RequiresRoot skips tests that require root, unless the test.root flag has
// been set
func RequiresRoot(t testing.TB) {
if !rootEnabled {
t.Skip("skipping test that requires root")
return
}
assert.Equal(t, 0, os.Getuid(), "This test must be run as root.")
}
// RequiresRootM is similar to RequiresRoot but intended to be called from *testing.M.
func RequiresRootM() {
if !rootEnabled {
fmt.Fprintln(os.Stderr, "skipping test that requires root")
os.Exit(0)
}
if 0 != os.Getuid() {
fmt.Fprintln(os.Stderr, "This test must be run as root.")
os.Exit(1)
}
}

View File

@ -0,0 +1,16 @@
package testutil
import "testing"
// RequiresRoot does nothing on Windows
func RequiresRoot(t testing.TB) {
}
// RequiresRootM is similar to RequiresRoot but intended to be called from *testing.M.
func RequiresRootM() {
}
// Unmount unmounts a given mountPoint and sets t.Error if it fails
// Does nothing on Windows
func Unmount(t *testing.T, mountPoint string) {
}

View File

@ -10,6 +10,7 @@ import (
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types" "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
) )
const Prefix = "types.containerd.io" const Prefix = "types.containerd.io"
@ -39,7 +40,7 @@ func TypeURL(v interface{}) (string, error) {
// fallback to the proto registry if it is a proto message // fallback to the proto registry if it is a proto message
pb, ok := v.(proto.Message) pb, ok := v.(proto.Message)
if !ok { if !ok {
return "", errdefs.ErrNotFound return "", errors.Wrapf(errdefs.ErrNotFound, "type %s", reflect.TypeOf(v))
} }
return path.Join(Prefix, proto.MessageName(pb)), nil return path.Join(Prefix, proto.MessageName(pb)), nil
} }
@ -116,7 +117,7 @@ func getTypeByUrl(url string) (urlType, error) {
isProto: true, isProto: true,
}, nil }, nil
} }
return urlType{}, errdefs.ErrNotFound return urlType{}, errors.Wrapf(errdefs.ErrNotFound, "type with url %s", url)
} }
func tryDereference(v interface{}) reflect.Type { func tryDereference(v interface{}) reflect.Type {

View File

@ -33,7 +33,7 @@ github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/BurntSushi/toml v0.2.0-21-g9906417 github.com/BurntSushi/toml v0.2.0-21-g9906417
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/Microsoft/go-winio v0.4.1 github.com/Microsoft/go-winio v0.4.3
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/Microsoft/hcsshim v0.5.15 github.com/Microsoft/hcsshim v0.5.15
github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e

View File

@ -69,6 +69,7 @@ func initIo() {
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. // win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
// It takes ownership of this handle and will close it if it is garbage collected. // It takes ownership of this handle and will close it if it is garbage collected.
type win32File struct { type win32File struct {
sync.Mutex
handle syscall.Handle handle syscall.Handle
wg sync.WaitGroup wg sync.WaitGroup
closing bool closing bool
@ -105,17 +106,28 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return makeWin32File(h) return makeWin32File(h)
} }
func (f *win32File) isClosing() bool {
f.Lock()
closing := f.closing
f.Unlock()
return closing
}
// closeHandle closes the resources associated with a Win32 handle // closeHandle closes the resources associated with a Win32 handle
func (f *win32File) closeHandle() { func (f *win32File) closeHandle() {
f.Lock()
if !f.closing { if !f.closing {
// cancel all IO and wait for it to complete // cancel all IO and wait for it to complete
f.closing = true f.closing = true
f.Unlock()
cancelIoEx(f.handle, nil) cancelIoEx(f.handle, nil)
f.wg.Wait() f.wg.Wait()
// at this point, no new IO can start // at this point, no new IO can start
syscall.Close(f.handle) syscall.Close(f.handle)
f.handle = 0 f.handle = 0
return
} }
f.Unlock()
} }
// Close closes a win32File. // Close closes a win32File.
@ -127,10 +139,10 @@ func (f *win32File) Close() error {
// prepareIo prepares for a new IO operation. // prepareIo prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIo() (*ioOperation, error) { func (f *win32File) prepareIo() (*ioOperation, error) {
f.wg.Add(1) if f.isClosing() {
if f.closing {
return nil, ErrFileClosed return nil, ErrFileClosed
} }
f.wg.Add(1)
c := &ioOperation{} c := &ioOperation{}
c.ch = make(chan ioResult) c.ch = make(chan ioResult)
return c, nil return c, nil
@ -159,7 +171,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
return int(bytes), err return int(bytes), err
} }
if f.closing { if f.isClosing() {
cancelIoEx(f.handle, &c.o) cancelIoEx(f.handle, &c.o)
} }
@ -175,7 +187,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
case r = <-c.ch: case r = <-c.ch:
err = r.err err = r.err
if err == syscall.ERROR_OPERATION_ABORTED { if err == syscall.ERROR_OPERATION_ABORTED {
if f.closing { if f.isClosing() {
err = ErrFileClosed err = ErrFileClosed
} }
} }

View File

@ -13,19 +13,12 @@ import (
) )
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW //sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW //sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
//sys copyMemory(dst uintptr, src uintptr, length uint32) = RtlCopyMemory
type securityAttributes struct {
Length uint32
SecurityDescriptor uintptr
InheritHandle uint32
}
const ( const (
cERROR_PIPE_BUSY = syscall.Errno(231) cERROR_PIPE_BUSY = syscall.Errno(231)
@ -233,13 +226,13 @@ func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig,
mode |= cPIPE_TYPE_MESSAGE mode |= cPIPE_TYPE_MESSAGE
} }
sa := &securityAttributes{} sa := &syscall.SecurityAttributes{}
sa.Length = uint32(unsafe.Sizeof(*sa)) sa.Length = uint32(unsafe.Sizeof(*sa))
if securityDescriptor != nil { if securityDescriptor != nil {
len := uint32(len(securityDescriptor)) len := uint32(len(securityDescriptor))
sa.SecurityDescriptor = localAlloc(0, len) sa.SecurityDescriptor = localAlloc(0, len)
defer localFree(sa.SecurityDescriptor) defer localFree(sa.SecurityDescriptor)
copyMemory(sa.SecurityDescriptor, uintptr(unsafe.Pointer(&securityDescriptor[0])), len) copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
} }
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
if err != nil { if err != nil {

View File

@ -53,7 +53,6 @@ var (
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc") procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procRtlCopyMemory = modkernel32.NewProc("RtlCopyMemory")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
@ -141,7 +140,7 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
return return
} }
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16 var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name) _p0, err = syscall.UTF16PtrFromString(name)
if err != nil { if err != nil {
@ -150,7 +149,7 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
} }
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0) handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle { if handle == syscall.InvalidHandle {
@ -163,7 +162,7 @@ func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances
return return
} }
func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16 var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name) _p0, err = syscall.UTF16PtrFromString(name)
if err != nil { if err != nil {
@ -172,7 +171,7 @@ func createFile(name string, access uint32, mode uint32, sa *securityAttributes,
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
} }
func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0) handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle { if handle == syscall.InvalidHandle {
@ -236,11 +235,6 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
return return
} }
func copyMemory(dst uintptr, src uintptr, length uint32) {
syscall.Syscall(procRtlCopyMemory.Addr(), 3, uintptr(dst), uintptr(src), uintptr(length))
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16 var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName) _p0, err = syscall.UTF16PtrFromString(accountName)

View File

@ -0,0 +1,146 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
/*
Package duration is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/duration/duration.proto
It has these top-level messages:
Duration
*/
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 189 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xfb, 0x83, 0x91, 0x71, 0x11, 0x13,
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
0x01, 0x00, 0x00, 0xff, 0xff, 0x45, 0x5a, 0x81, 0x3d, 0x0e, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,117 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

View File

@ -1,237 +0,0 @@
// +build windows
package windows
import (
"context"
"encoding/json"
"fmt"
"sync"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/runtime"
"github.com/containerd/containerd/windows/hcs"
"github.com/gogo/protobuf/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
winsys "golang.org/x/sys/windows"
)
var ErrLoadedContainer = errors.New("loaded container can only be terminated")
func loadContainers(ctx context.Context, h *hcs.HCS) ([]*container, error) {
hCtr, err := h.LoadContainers(ctx)
if err != nil {
return nil, err
}
containers := make([]*container, 0)
for _, c := range hCtr {
containers = append(containers, &container{
ctr: c,
status: runtime.RunningStatus,
})
}
return containers, nil
}
func newContainer(ctx context.Context, h *hcs.HCS, id string, spec *RuntimeSpec, io runtime.IO) (*container, error) {
cio, err := hcs.NewIO(io.Stdin, io.Stdout, io.Stderr, io.Terminal)
if err != nil {
return nil, err
}
hcsCtr, err := h.CreateContainer(ctx, id, spec.OCISpec, spec.Configuration, cio)
if err != nil {
return nil, err
}
//sendEvent(id, events.RuntimeEvent_CREATE, hcsCtr.Pid(), 0, time.Time{})
return &container{
ctr: hcsCtr,
status: runtime.CreatedStatus,
}, nil
}
type container struct {
sync.Mutex
ctr *hcs.Container
status runtime.Status
}
func (c *container) ID() string {
return c.ctr.ID()
}
func (c *container) Info() runtime.TaskInfo {
return runtime.TaskInfo{
ID: c.ctr.ID(),
Runtime: runtimeName,
}
}
func (c *container) Start(ctx context.Context) error {
if c.ctr.Pid() == 0 {
return ErrLoadedContainer
}
err := c.ctr.Start(ctx)
if err != nil {
return err
}
c.setStatus(runtime.RunningStatus)
// c.sendEvent(c.ctr.ID(), events.RuntimeEvent_START, c.ctr.Pid(), 0, time.Time{})
// Wait for our process to terminate
go func() {
_, err := c.ctr.ExitCode()
if err != nil {
log.G(ctx).Debug(err)
}
c.setStatus(runtime.StoppedStatus)
// c.sendEvent(c.ctr.ID(), events.RuntimeEvent_EXIT, c.ctr.Pid(), ec, c.ctr.Processes()[0].ExitedAt())
}()
return nil
}
func (c *container) Pause(ctx context.Context) error {
if c.ctr.GetConfiguration().UseHyperV == false {
return fmt.Errorf("Windows non-HyperV containers do not support pause")
}
return c.ctr.Pause()
}
func (c *container) Resume(ctx context.Context) error {
if c.ctr.GetConfiguration().UseHyperV == false {
return fmt.Errorf("Windows non-HyperV containers do not support resume")
}
return c.ctr.Resume()
}
func (c *container) State(ctx context.Context) (runtime.State, error) {
return runtime.State{
Pid: c.Pid(),
Status: c.Status(),
}, nil
}
func (c *container) Kill(ctx context.Context, signal uint32, all bool) error {
if winsys.Signal(signal) == winsys.SIGKILL {
return c.ctr.Kill(ctx)
}
return c.ctr.Stop(ctx)
}
func (c *container) Process(ctx context.Context, id string) (runtime.Process, error) {
for _, p := range c.ctr.Processes() {
if p.ID() == id {
return &process{p}, nil
}
}
return nil, errors.Errorf("process %s not found", id)
}
func (c *container) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) {
if c.ctr.Pid() == 0 {
return nil, ErrLoadedContainer
}
pio, err := hcs.NewIO(opts.IO.Stdin, opts.IO.Stdout, opts.IO.Stderr, opts.IO.Terminal)
if err != nil {
return nil, err
}
var procSpec specs.Process
if err := json.Unmarshal(opts.Spec.Value, &procSpec); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal oci spec")
}
p, err := c.ctr.AddProcess(ctx, id, &procSpec, pio)
if err != nil {
return nil, err
}
go func() {
_, err := p.ExitCode()
if err != nil {
log.G(ctx).Debug(err)
}
//c.sendEvent(c.ctr.ID(), events.RuntimeEvent_EXEC_ADDED, p.Pid(), ec, p.ExitedAt())
}()
return &process{p}, nil
}
func (c *container) CloseIO(ctx context.Context) error {
return c.ctr.CloseIO(ctx)
}
func (c *container) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
return c.ctr.ResizePty(ctx, size)
}
func (c *container) Status() runtime.Status {
return c.getStatus()
}
func (c *container) Pid() uint32 {
return c.ctr.Pid()
}
func (c *container) Pids(ctx context.Context) ([]uint32, error) {
pl, err := c.ctr.ProcessList()
if err != nil {
return nil, err
}
pids := make([]uint32, 0, len(pl))
for _, p := range pl {
pids = append(pids, p.ProcessId)
}
return pids, nil
}
func (c *container) Checkpoint(ctx context.Context, _ string, _ *types.Any) error {
return fmt.Errorf("Windows containers do not support checkpoint")
}
func (c *container) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, error) {
var process *hcs.Process
for _, p := range c.ctr.Processes() {
if p.ID() == id {
process = p
break
}
}
if process == nil {
return nil, fmt.Errorf("process %s not found", id)
}
ec, err := process.ExitCode()
if err != nil {
return nil, err
}
process.Delete()
return &runtime.Exit{
Status: ec,
Timestamp: process.ExitedAt(),
}, nil
}
func (c *container) Update(ctx context.Context, spec *types.Any) error {
return fmt.Errorf("Windows containers do not support update")
}
func (c *container) setStatus(status runtime.Status) {
c.Lock()
c.status = status
c.Unlock()
}
func (c *container) getStatus() runtime.Status {
c.Lock()
defer c.Unlock()
return c.status
}

View File

@ -1,572 +0,0 @@
// +build windows
package hcs
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/Microsoft/hcsshim"
"github.com/Sirupsen/logrus"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/runtime"
"github.com/containerd/containerd/windows/pid"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
const (
layerFile = "layer"
defaultTerminateTimeout = 5 * time.Minute
)
func (s *HCS) LoadContainers(ctx context.Context) ([]*Container, error) {
ctrProps, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{})
if err != nil {
return nil, errors.Wrap(err, "failed to retrieve running containers")
}
containers := make([]*Container, 0)
for _, p := range ctrProps {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if p.Owner != s.owner || p.SystemType != "Container" {
continue
}
container, err := hcsshim.OpenContainer(p.ID)
if err != nil {
return nil, errors.Wrapf(err, "failed open container %s", p.ID)
}
stateDir := filepath.Join(s.stateDir, p.ID)
b, err := ioutil.ReadFile(filepath.Join(stateDir, layerFile))
containers = append(containers, &Container{
id: p.ID,
Container: container,
stateDir: stateDir,
hcs: s,
io: &IO{},
layerFolderPath: string(b),
conf: Configuration{
TerminateDuration: defaultTerminateTimeout,
},
})
}
return containers, nil
}
func New(owner, rootDir string) *HCS {
return &HCS{
stateDir: rootDir,
owner: owner,
pidPool: pid.NewPool(),
}
}
type HCS struct {
stateDir string
owner string
pidPool *pid.Pool
}
func (s *HCS) CreateContainer(ctx context.Context, id string, spec specs.Spec, conf Configuration, io *IO) (c *Container, err error) {
pid, err := s.pidPool.Get()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
s.pidPool.Put(pid)
}
}()
stateDir := filepath.Join(s.stateDir, id)
if err := os.MkdirAll(stateDir, 0755); err != nil {
return nil, errors.Wrapf(err, "unable to create container state dir %s", stateDir)
}
defer func() {
if err != nil {
os.RemoveAll(stateDir)
}
}()
if conf.TerminateDuration == 0 {
conf.TerminateDuration = defaultTerminateTimeout
}
ctrConf, err := newContainerConfig(s.owner, id, spec, conf)
if err != nil {
return nil, err
}
layerPathFile := filepath.Join(stateDir, layerFile)
if err := ioutil.WriteFile(layerPathFile, []byte(ctrConf.LayerFolderPath), 0644); err != nil {
log.G(ctx).WithError(err).Warnf("failed to save active layer %s", ctrConf.LayerFolderPath)
}
ctr, err := hcsshim.CreateContainer(id, ctrConf)
if err != nil {
removeLayer(ctx, ctrConf.LayerFolderPath)
return nil, errors.Wrapf(err, "failed to create container %s", id)
}
err = ctr.Start()
if err != nil {
ctr.Terminate()
removeLayer(ctx, ctrConf.LayerFolderPath)
return nil, errors.Wrapf(err, "failed to start container %s", id)
}
return &Container{
Container: ctr,
id: id,
pid: pid,
spec: spec,
conf: conf,
stateDir: stateDir,
io: io,
hcs: s,
layerFolderPath: ctrConf.LayerFolderPath,
processes: make([]*Process, 0),
}, nil
}
type Container struct {
sync.Mutex
hcsshim.Container
id string
stateDir string
pid uint32
spec specs.Spec
conf Configuration
io *IO
hcs *HCS
layerFolderPath string
processes []*Process
}
func (c *Container) ID() string {
return c.id
}
func (c *Container) Pid() uint32 {
return c.pid
}
func (c *Container) Processes() []*Process {
return c.processes
}
func (c *Container) Start(ctx context.Context) error {
_, err := c.addProcess(ctx, c.id, c.spec.Process, c.io)
return err
}
func (c *Container) getDeathErr(err error) error {
switch {
case hcsshim.IsPending(err):
err = c.WaitTimeout(c.conf.TerminateDuration)
case hcsshim.IsAlreadyStopped(err):
err = nil
}
return err
}
func (c *Container) Kill(ctx context.Context) error {
return c.getDeathErr(c.Terminate())
}
func (c *Container) Stop(ctx context.Context) error {
err := c.getDeathErr(c.Shutdown())
if err != nil {
log.G(ctx).WithError(err).Debugf("failed to shutdown container %s, calling terminate", c.id)
return c.getDeathErr(c.Terminate())
}
return nil
}
func (c *Container) CloseIO(ctx context.Context) error {
var proc *Process
c.Lock()
for _, p := range c.processes {
if p.id == c.id {
proc = p
break
}
}
c.Unlock()
if proc == nil {
return errors.Errorf("no such process %s", c.id)
}
return proc.CloseStdin()
}
func (c *Container) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
var proc *Process
c.Lock()
for _, p := range c.processes {
if p.id == c.id {
proc = p
break
}
}
c.Unlock()
if proc == nil {
return errors.Errorf("no such process %s", c.id)
}
return proc.ResizeConsole(uint16(size.Width), uint16(size.Height))
}
func (c *Container) Delete(ctx context.Context) {
defer func() {
if err := c.Stop(ctx); err != nil {
log.G(ctx).WithError(err).WithField("id", c.id).
Errorf("failed to shutdown/terminate container")
}
c.Lock()
for _, p := range c.processes {
if err := p.Delete(); err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{"pid": p.Pid(), "id": c.id}).
Errorf("failed to clean process resources")
}
}
c.Unlock()
if err := c.Close(); err != nil {
log.G(ctx).WithError(err).WithField("id", c.id).Errorf("failed to clean container resources")
}
c.io.Close()
// Cleanup folder layer
if err := removeLayer(ctx, c.layerFolderPath); err == nil {
os.RemoveAll(c.stateDir)
}
}()
if update, err := c.HasPendingUpdates(); err != nil || !update {
return
}
serviceCtr, err := c.hcs.CreateContainer(ctx, c.id+"_servicing", c.spec, c.conf, &IO{})
if err != nil {
log.G(ctx).WithError(err).WithField("id", c.id).Warn("could not create servicing container")
return
}
defer serviceCtr.Close()
err = serviceCtr.Start(ctx)
if err != nil {
log.G(ctx).WithError(err).WithField("id", c.id).Warn("failed to start servicing container")
serviceCtr.Terminate()
return
}
err = serviceCtr.processes[0].Wait()
if err == nil {
_, err = serviceCtr.processes[0].ExitCode()
log.G(ctx).WithError(err).WithField("id", c.id).Errorf("failed to retrieve servicing container exit code")
}
if err != nil {
if err := serviceCtr.Terminate(); err != nil {
log.G(ctx).WithError(err).WithField("id", c.id).Errorf("failed to terminate servicing container")
}
}
}
func (c *Container) ExitCode() (uint32, error) {
if len(c.processes) == 0 {
return 255, errors.New("container not started")
}
return c.processes[0].ExitCode()
}
func (c *Container) GetConfiguration() Configuration {
return c.conf
}
func (c *Container) AddProcess(ctx context.Context, id string, spec *specs.Process, io *IO) (*Process, error) {
if len(c.processes) == 0 {
return nil, errors.New("container not started")
}
return c.addProcess(ctx, id, spec, io)
}
func (c *Container) addProcess(ctx context.Context, id string, spec *specs.Process, pio *IO) (*Process, error) {
// If we don't have a process yet, reused the container pid
var pid uint32
if len(c.processes) == 0 {
pid = c.pid
} else {
pid, err := c.hcs.pidPool.Get()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
c.hcs.pidPool.Put(pid)
}
}()
}
conf := hcsshim.ProcessConfig{
EmulateConsole: pio.terminal,
CreateStdInPipe: pio.stdin != nil,
CreateStdOutPipe: pio.stdout != nil,
CreateStdErrPipe: pio.stderr != nil,
User: spec.User.Username,
CommandLine: strings.Join(spec.Args, " "),
Environment: ociSpecEnvToHCSEnv(spec.Env),
WorkingDirectory: spec.Cwd,
ConsoleSize: [2]uint{spec.ConsoleSize.Height, spec.ConsoleSize.Width},
}
if conf.WorkingDirectory == "" {
conf.WorkingDirectory = c.spec.Process.Cwd
}
proc, err := c.CreateProcess(&conf)
if err != nil {
return nil, errors.Wrapf(err, "failed to create process")
}
stdin, stdout, stderr, err := proc.Stdio()
if err != nil {
proc.Kill()
return nil, errors.Wrapf(err, "failed to retrieve process stdio")
}
if pio.stdin != nil {
go func() {
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdin: copy started")
io.Copy(stdin, pio.stdin)
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdin: copy done")
stdin.Close()
pio.stdin.Close()
}()
} else {
proc.CloseStdin()
}
if pio.stdout != nil {
go func() {
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdout: copy started")
io.Copy(pio.stdout, stdout)
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdout: copy done")
stdout.Close()
pio.stdout.Close()
}()
}
if pio.stderr != nil {
go func() {
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stderr: copy started")
io.Copy(pio.stderr, stderr)
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stderr: copy done")
stderr.Close()
pio.stderr.Close()
}()
}
p := &Process{
id: id,
Process: proc,
pid: pid,
io: pio,
ecSync: make(chan struct{}),
}
c.Lock()
c.processes = append(c.processes, p)
idx := len(c.processes) - 1
c.Unlock()
go func() {
p.ec, p.ecErr = processExitCode(c.ID(), p)
close(p.ecSync)
c.Lock()
p.Delete()
// Remove process from slice (but keep the init one around)
if idx > 0 {
c.processes[idx] = c.processes[len(c.processes)-1]
c.processes[len(c.processes)-1] = nil
c.processes = c.processes[:len(c.processes)-1]
}
c.Unlock()
}()
return p, nil
}
// newHCSConfiguration generates a hcsshim configuration from the instance
// OCI Spec and hcs.Configuration.
func newContainerConfig(owner, id string, spec specs.Spec, conf Configuration) (*hcsshim.ContainerConfig, error) {
configuration := &hcsshim.ContainerConfig{
SystemType: "Container",
Name: id,
Owner: owner,
HostName: spec.Hostname,
IgnoreFlushesDuringBoot: conf.IgnoreFlushesDuringBoot,
HvPartition: conf.UseHyperV,
AllowUnqualifiedDNSQuery: conf.AllowUnqualifiedDNSQuery,
EndpointList: conf.NetworkEndpoints,
NetworkSharedContainerName: conf.NetworkSharedContainerID,
Credentials: conf.Credentials,
}
// TODO: use the create request Mount for those
for _, layerPath := range conf.Layers {
_, filename := filepath.Split(layerPath)
guid, err := hcsshim.NameToGuid(filename)
if err != nil {
return nil, err
}
configuration.Layers = append(configuration.Layers, hcsshim.Layer{
ID: guid.ToString(),
Path: layerPath,
})
}
if len(spec.Mounts) > 0 {
mds := make([]hcsshim.MappedDir, len(spec.Mounts))
for i, mount := range spec.Mounts {
mds[i] = hcsshim.MappedDir{
HostPath: mount.Source,
ContainerPath: mount.Destination,
ReadOnly: false,
}
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
mds[i].ReadOnly = true
}
}
}
configuration.MappedDirectories = mds
}
if conf.DNSSearchList != nil {
configuration.DNSSearchList = strings.Join(conf.DNSSearchList, ",")
}
if configuration.HvPartition {
for _, layerPath := range conf.Layers {
utilityVMPath := filepath.Join(layerPath, "UtilityVM")
_, err := os.Stat(utilityVMPath)
if err == nil {
configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: utilityVMPath}
break
} else if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to access layer %s", layerPath)
}
}
}
if len(configuration.Layers) == 0 {
// TODO: support starting with 0 layers, this mean we need the "filter" directory as parameter
return nil, errors.New("at least one layers must be provided")
}
di := hcsshim.DriverInfo{
Flavour: 1, // filter driver
}
if len(configuration.Layers) > 0 {
di.HomeDir = filepath.Dir(conf.Layers[0])
}
// Windows doesn't support creating a container with a readonly
// filesystem, so always create a RW one
if err := hcsshim.CreateSandboxLayer(di, id, conf.Layers[0], conf.Layers); err != nil {
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
id, configuration.Layers, di)
}
configuration.LayerFolderPath = filepath.Join(di.HomeDir, id)
err := hcsshim.ActivateLayer(di, id)
if err != nil {
removeLayer(context.TODO(), configuration.LayerFolderPath)
return nil, errors.Wrapf(err, "failed to active layer %s", configuration.LayerFolderPath)
}
err = hcsshim.PrepareLayer(di, id, conf.Layers)
if err != nil {
removeLayer(context.TODO(), configuration.LayerFolderPath)
return nil, errors.Wrapf(err, "failed to prepare layer %s", configuration.LayerFolderPath)
}
volumePath, err := hcsshim.GetLayerMountPath(di, id)
if err != nil {
if err := hcsshim.DestroyLayer(di, id); err != nil {
log.L.Warnf("failed to DestroyLayer %s: %s", id, err)
}
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
}
configuration.VolumePath = volumePath
return configuration, nil
}
// removeLayer deletes the given layer, all associated containers must have
// been shutdown for this to succeed.
func removeLayer(ctx context.Context, path string) error {
layerID := filepath.Base(path)
parentPath := filepath.Dir(path)
di := hcsshim.DriverInfo{
Flavour: 1, // filter driver
HomeDir: parentPath,
}
err := hcsshim.UnprepareLayer(di, layerID)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to unprepare layer %s for removal", path)
}
err = hcsshim.DeactivateLayer(di, layerID)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to deactivate layer %s for removal", path)
}
removePath := filepath.Join(parentPath, fmt.Sprintf("%s-removing", layerID))
err = os.Rename(path, removePath)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to rename container layer %s for removal", path)
removePath = path
}
if err := hcsshim.DestroyLayer(di, removePath); err != nil {
log.G(ctx).WithError(err).Errorf("failed to remove container layer %s", removePath)
return err
}
return nil
}
// ociSpecEnvToHCSEnv converts from the OCI Spec ENV format to the one
// expected by HCS.
func ociSpecEnvToHCSEnv(a []string) map[string]string {
env := make(map[string]string)
for _, s := range a {
arr := strings.SplitN(s, "=", 2)
if len(arr) == 2 {
env[arr[0]] = arr[1]
}
}
return env
}

View File

@ -1,76 +0,0 @@
// +build windows
package hcs
import (
"syscall"
"time"
"github.com/Microsoft/hcsshim"
"github.com/containerd/containerd/runtime"
"github.com/pkg/errors"
)
type Process struct {
hcsshim.Process
id string
pid uint32
io *IO
ec uint32
exitedAt time.Time
ecErr error
ecSync chan struct{}
}
func (p *Process) ID() string {
return p.id
}
func (p *Process) Pid() uint32 {
return p.pid
}
func (p *Process) ExitCode() (uint32, error) {
<-p.ecSync
return p.ec, p.ecErr
}
func (p *Process) ExitedAt() time.Time {
return p.exitedAt
}
func (p *Process) Status() runtime.Status {
select {
case <-p.ecSync:
return runtime.StoppedStatus
default:
}
return runtime.RunningStatus
}
func (p *Process) Delete() error {
p.io.Close()
return p.Close()
}
func processExitCode(containerID string, p *Process) (uint32, error) {
if err := p.Wait(); err != nil {
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
return 255, errors.Wrapf(err, "failed to wait for container '%s' process %u", containerID, p.pid)
}
// process is probably dead, let's try to get its exit code
}
ec, err := p.Process.ExitCode()
if err != nil {
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
return 255, errors.Wrapf(err, "failed to get container '%s' process %d exit code", containerID, p.pid)
}
// Well, unknown exit code it is
ec = 255
}
p.exitedAt = time.Now()
return uint32(ec), err
}

View File

@ -1,76 +0,0 @@
// +build windows
package hcs
import (
"net"
"time"
"github.com/Microsoft/go-winio"
"github.com/pkg/errors"
)
type IO struct {
stdin net.Conn
stdout net.Conn
stderr net.Conn
terminal bool
}
// NewIO connects to the provided pipe addresses
func NewIO(stdin, stdout, stderr string, terminal bool) (*IO, error) {
var (
c net.Conn
err error
io IO
)
defer func() {
if err != nil {
io.Close()
}
}()
for _, p := range []struct {
name string
open bool
conn *net.Conn
}{
{
name: stdin,
open: stdin != "",
conn: &io.stdin,
},
{
name: stdout,
open: stdout != "",
conn: &io.stdout,
},
{
name: stderr,
open: !terminal && stderr != "",
conn: &io.stderr,
},
} {
if p.open {
dialTimeout := 3 * time.Second
c, err = winio.DialPipe(p.name, &dialTimeout)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to %s", p.name)
}
*p.conn = c
}
}
return &io, nil
}
// Close terminates all successfully dialed IO connections
func (i *IO) Close() {
for _, cn := range []net.Conn{i.stdin, i.stdout, i.stderr} {
if cn != nil {
cn.Close()
cn = nil
}
}
}

View File

@ -1,22 +0,0 @@
// +build windows
package hcs
import "time"
type Configuration struct {
UseHyperV bool `json:"useHyperV,omitempty"`
Layers []string `json:"layers"`
TerminateDuration time.Duration `json:"terminateDuration,omitempty"`
IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"`
AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"`
DNSSearchList []string `json:"dnsSearchList,omitempty"`
NetworkEndpoints []string `json:"networkEndpoints,omitempty"`
NetworkSharedContainerID string
Credentials string `json:"credentials,omitempty"`
}

190
windows/hcsshim.go Normal file
View File

@ -0,0 +1,190 @@
//+build windows
package windows
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// newContainerConfig generates a hcsshim container configuration from the
// provided OCI Spec
func newContainerConfig(ctx context.Context, owner, id string, spec *specs.Spec) (*hcsshim.ContainerConfig, error) {
if len(spec.Windows.LayerFolders) == 0 {
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
"spec.Windows.LayerFolders cannot be empty")
}
var (
layerFolders = spec.Windows.LayerFolders
conf = &hcsshim.ContainerConfig{
SystemType: "Container",
Name: id,
Owner: owner,
HostName: spec.Hostname,
IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
AllowUnqualifiedDNSQuery: spec.Windows.Network.AllowUnqualifiedDNSQuery,
EndpointList: spec.Windows.Network.EndpointList,
NetworkSharedContainerName: spec.Windows.Network.NetworkSharedContainerName,
}
)
if spec.Windows.CredentialSpec != nil {
conf.Credentials = spec.Windows.CredentialSpec.(string)
}
// TODO: use the create request Mount for those
for _, layerPath := range layerFolders {
_, filename := filepath.Split(layerPath)
guid, err := hcsshim.NameToGuid(filename)
if err != nil {
return nil, errors.Wrapf(err, "unable to get GUID for %s", filename)
}
conf.Layers = append(conf.Layers, hcsshim.Layer{
ID: guid.ToString(),
Path: layerPath,
})
}
if len(spec.Mounts) > 0 {
mds := make([]hcsshim.MappedDir, len(spec.Mounts))
for i, mount := range spec.Mounts {
mds[i] = hcsshim.MappedDir{
HostPath: mount.Source,
ContainerPath: mount.Destination,
ReadOnly: false,
}
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
mds[i].ReadOnly = true
}
}
}
conf.MappedDirectories = mds
}
if spec.Windows.Network.DNSSearchList != nil {
conf.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
}
if spec.Windows.HyperV != nil {
conf.HvPartition = true
for _, layerPath := range layerFolders {
utilityVMPath := spec.Windows.HyperV.UtilityVMPath
_, err := os.Stat(utilityVMPath)
if err == nil {
conf.HvRuntime = &hcsshim.HvRuntime{ImagePath: utilityVMPath}
break
} else if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to access layer %s", layerPath)
}
}
}
var (
err error
di = hcsshim.DriverInfo{
Flavour: 1, // filter driver
HomeDir: filepath.Dir(layerFolders[0]),
}
)
// TODO: Once there is a snapshotter for windows, this can be deleted.
// The R/W Layer should come from the Rootfs Mounts provided
//
// Windows doesn't support creating a container with a readonly
// filesystem, so always create a RW one
if err = hcsshim.CreateSandboxLayer(di, id, layerFolders[0], layerFolders); err != nil {
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
id, layerFolders, di)
}
conf.LayerFolderPath = filepath.Join(di.HomeDir, id)
defer func() {
if err != nil {
removeLayer(ctx, conf.LayerFolderPath)
}
}()
if err = hcsshim.ActivateLayer(di, id); err != nil {
return nil, errors.Wrapf(err, "failed to activate layer %s", conf.LayerFolderPath)
}
if err = hcsshim.PrepareLayer(di, id, layerFolders); err != nil {
return nil, errors.Wrapf(err, "failed to prepare layer %s", conf.LayerFolderPath)
}
conf.VolumePath, err = hcsshim.GetLayerMountPath(di, id)
if err != nil {
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
}
return conf, nil
}
// removeLayer deletes the given layer, all associated containers must have
// been shutdown for this to succeed.
func removeLayer(ctx context.Context, path string) error {
var (
err error
layerID = filepath.Base(path)
parentPath = filepath.Dir(path)
di = hcsshim.DriverInfo{
Flavour: 1, // filter driver
HomeDir: parentPath,
}
)
if err = hcsshim.UnprepareLayer(di, layerID); err != nil {
log.G(ctx).WithError(err).Warnf("failed to unprepare layer %s for removal", path)
}
if err = hcsshim.DeactivateLayer(di, layerID); err != nil {
log.G(ctx).WithError(err).Warnf("failed to deactivate layer %s for removal", path)
}
removePath := filepath.Join(parentPath, fmt.Sprintf("%s-removing", layerID))
if err = os.Rename(path, removePath); err != nil {
log.G(ctx).WithError(err).Warnf("failed to rename container layer %s for removal", path)
removePath = path
}
if err = hcsshim.DestroyLayer(di, removePath); err != nil {
log.G(ctx).WithError(err).Errorf("failed to remove container layer %s", removePath)
return err
}
return nil
}
func newProcessConfig(spec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig {
conf := &hcsshim.ProcessConfig{
EmulateConsole: pset.src.Terminal,
CreateStdInPipe: pset.stdin != nil,
CreateStdOutPipe: pset.stdout != nil,
CreateStdErrPipe: pset.stderr != nil,
User: spec.User.Username,
CommandLine: strings.Join(spec.Args, " "),
Environment: make(map[string]string),
WorkingDirectory: spec.Cwd,
ConsoleSize: [2]uint{spec.ConsoleSize.Height, spec.ConsoleSize.Width},
}
// Convert OCI Env format to HCS's
for _, s := range spec.Env {
arr := strings.SplitN(s, "=", 2)
if len(arr) == 2 {
conf.Environment[arr[0]] = arr[1]
}
}
return conf
}

View File

@ -0,0 +1,2 @@
// hcsshimopts holds the windows runtime specific options
package hcsshimopts

View File

@ -0,0 +1,352 @@
// Code generated by protoc-gen-gogo.
// source: github.com/containerd/containerd/windows/hcsshimopts/hcsshim.proto
// DO NOT EDIT!
/*
Package hcsshimopts is a generated protocol buffer package.
It is generated from these files:
github.com/containerd/containerd/windows/hcsshimopts/hcsshim.proto
It has these top-level messages:
CreateOptions
*/
package hcsshimopts
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/golang/protobuf/ptypes/duration"
import time "time"
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type CreateOptions struct {
TerminateDuration time.Duration `protobuf:"bytes,1,opt,name=terminate_duration,json=terminateDuration,stdduration" json:"terminate_duration"`
}
func (m *CreateOptions) Reset() { *m = CreateOptions{} }
func (*CreateOptions) ProtoMessage() {}
func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorHcsshim, []int{0} }
func init() {
proto.RegisterType((*CreateOptions)(nil), "containerd.windows.hcsshim.CreateOptions")
}
func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintHcsshim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.TerminateDuration)))
n1, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.TerminateDuration, dAtA[i:])
if err != nil {
return 0, err
}
i += n1
return i, nil
}
func encodeFixed64Hcsshim(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Hcsshim(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintHcsshim(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *CreateOptions) Size() (n int) {
var l int
_ = l
l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.TerminateDuration)
n += 1 + l + sovHcsshim(uint64(l))
return n
}
func sovHcsshim(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozHcsshim(x uint64) (n int) {
return sovHcsshim(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *CreateOptions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CreateOptions{`,
`TerminateDuration:` + strings.Replace(strings.Replace(this.TerminateDuration.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringHcsshim(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *CreateOptions) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHcsshim
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TerminateDuration", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHcsshim
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthHcsshim
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.TerminateDuration, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipHcsshim(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthHcsshim
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipHcsshim(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHcsshim
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHcsshim
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHcsshim
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthHcsshim
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHcsshim
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipHcsshim(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthHcsshim = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowHcsshim = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/windows/hcsshimopts/hcsshim.proto", fileDescriptorHcsshim)
}
var fileDescriptorHcsshim = []byte{
// 227 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4a, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x96, 0x67, 0xe6, 0xa5, 0xe4, 0x97, 0x17, 0xeb, 0x67, 0x24, 0x17, 0x17, 0x67,
0x64, 0xe6, 0xe6, 0x17, 0x94, 0xc0, 0xd9, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x52, 0x08,
0xd5, 0x7a, 0x50, 0xd5, 0x7a, 0x50, 0x15, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0x65, 0xfa,
0x20, 0x16, 0x44, 0x87, 0x94, 0x5c, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54,
0x9a, 0xa6, 0x9f, 0x52, 0x5a, 0x94, 0x58, 0x92, 0x99, 0x9f, 0x07, 0x91, 0x57, 0x4a, 0xe6, 0xe2,
0x75, 0x2e, 0x4a, 0x4d, 0x2c, 0x49, 0xf5, 0x2f, 0x00, 0x89, 0x16, 0x0b, 0x05, 0x71, 0x09, 0x95,
0xa4, 0x16, 0xe5, 0x66, 0xe6, 0x25, 0x96, 0xa4, 0xc6, 0xc3, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a,
0x70, 0x1b, 0x49, 0xea, 0x41, 0x4c, 0xd3, 0x83, 0x99, 0xa6, 0xe7, 0x02, 0x55, 0xe0, 0xc4, 0x71,
0xe2, 0x9e, 0x3c, 0xc3, 0x8c, 0xfb, 0xf2, 0x8c, 0x41, 0x82, 0x70, 0xed, 0x70, 0xc9, 0xa8, 0x13,
0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x46, 0x39, 0x90, 0x13, 0x28, 0xd6, 0x48, 0xec,
0x24, 0x36, 0xb0, 0x5b, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x75, 0x31, 0x65, 0xd0, 0x5f,
0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,12 @@
syntax = "proto3";
package containerd.windows.hcsshim;
import "gogoproto/gogo.proto";
import "google/protobuf/duration.proto";
option go_package = "github.com/containerd/containerd/windows/hcsshimopts;hcsshimopts";
message CreateOptions {
google.protobuf.Duration terminate_duration = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];
}

110
windows/io.go Normal file
View File

@ -0,0 +1,110 @@
// +build windows
package windows
import (
"context"
"net"
"sync"
"time"
"github.com/Microsoft/go-winio"
"github.com/containerd/containerd/runtime"
"github.com/pkg/errors"
)
type pipeSet struct {
src runtime.IO
stdin net.Conn
stdout net.Conn
stderr net.Conn
}
// NewIO connects to the provided pipe addresses
func newPipeSet(ctx context.Context, io runtime.IO) (*pipeSet, error) {
var (
err error
c net.Conn
wg sync.WaitGroup
set = &pipeSet{src: io}
ch = make(chan error)
opened = 0
)
defer func() {
if err != nil {
go func() {
for i := 0; i < opened; i++ {
// Drain the channel to avoid leaking the goroutines
<-ch
}
close(ch)
wg.Wait()
set.Close()
}()
}
}()
for _, p := range [3]struct {
name string
open bool
conn *net.Conn
}{
{
name: io.Stdin,
open: io.Stdin != "",
conn: &set.stdin,
},
{
name: io.Stdout,
open: io.Stdout != "",
conn: &set.stdout,
},
{
name: io.Stderr,
open: !io.Terminal && io.Stderr != "",
conn: &set.stderr,
},
} {
if p.open {
wg.Add(1)
opened++
go func(name string, conn *net.Conn) {
dialTimeout := 3 * time.Second
c, err = winio.DialPipe(name, &dialTimeout)
if err != nil {
ch <- errors.Wrapf(err, "failed to connect to %s", name)
}
*conn = c
ch <- nil
wg.Done()
}(p.name, p.conn)
}
}
for i := 0; i < opened; i++ {
select {
case <-ctx.Done():
return nil, ctx.Err()
case e := <-ch:
if e != nil {
if err == nil {
err = e
} else {
err = errors.Wrapf(err, e.Error())
}
}
}
}
return set, err
}
// Close terminates all successfully dialed IO connections
func (p *pipeSet) Close() {
for _, cn := range []net.Conn{p.stdin, p.stdout, p.stderr} {
if cn != nil {
cn.Close()
}
}
}

54
windows/meta.go Normal file
View File

@ -0,0 +1,54 @@
// +build windows
package windows
// TODO: remove this file (i.e. meta.go) once we have a snapshotter
import (
"github.com/boltdb/bolt"
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
)
func newLayerFolderStore(tx *bolt.Tx) *layerFolderStore {
return &layerFolderStore{tx}
}
type layerFolderStore struct {
tx *bolt.Tx
}
func (s *layerFolderStore) Create(id, layer string) error {
bkt, err := s.tx.CreateBucketIfNotExists([]byte(pluginID))
if err != nil {
return errors.Wrapf(err, "failed to create bucket %s", pluginID)
}
err = bkt.Put([]byte(id), []byte(layer))
if err != nil {
return errors.Wrapf(err, "failed to store entry %s:%s", id, layer)
}
return nil
}
func (s *layerFolderStore) Get(id string) (string, error) {
bkt := s.tx.Bucket([]byte(pluginID))
if bkt == nil {
return "", errors.Wrapf(errdefs.ErrNotFound, "bucket %s", pluginID)
}
return string(bkt.Get([]byte(id))), nil
}
func (s *layerFolderStore) Delete(id string) error {
bkt := s.tx.Bucket([]byte(pluginID))
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "bucket %s", pluginID)
}
if err := bkt.Delete([]byte(id)); err != nil {
return errors.Wrapf(err, "failed to delete entry %s", id)
}
return nil
}

View File

@ -1,25 +1,25 @@
// +build windows // +build windows
package pid package windows
import ( import (
"errors" "errors"
"sync" "sync"
) )
type Pool struct { type pidPool struct {
sync.Mutex sync.Mutex
pool map[uint32]struct{} pool map[uint32]struct{}
cur uint32 cur uint32
} }
func NewPool() *Pool { func newPidPool() *pidPool {
return &Pool{ return &pidPool{
pool: make(map[uint32]struct{}), pool: make(map[uint32]struct{}),
} }
} }
func (p *Pool) Get() (uint32, error) { func (p *pidPool) Get() (uint32, error) {
p.Lock() p.Lock()
defer p.Unlock() defer p.Unlock()
@ -31,6 +31,7 @@ func (p *Pool) Get() (uint32, error) {
} }
if _, ok := p.pool[pid]; !ok { if _, ok := p.pool[pid]; !ok {
p.cur = pid p.cur = pid
p.pool[pid] = struct{}{}
return pid, nil return pid, nil
} }
pid++ pid++
@ -39,7 +40,7 @@ func (p *Pool) Get() (uint32, error) {
return 0, errors.New("pid pool exhausted") return 0, errors.New("pid pool exhausted")
} }
func (p *Pool) Put(pid uint32) { func (p *pidPool) Put(pid uint32) {
p.Lock() p.Lock()
delete(p.pool, pid) delete(p.pool, pid)
p.Unlock() p.Unlock()

View File

@ -4,39 +4,80 @@ package windows
import ( import (
"context" "context"
"time"
"github.com/Microsoft/hcsshim"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime"
"github.com/containerd/containerd/windows/hcs" "github.com/pkg/errors"
) )
// process implements containerd.Process and containerd.State // process implements containerd.Process and containerd.State
type process struct { type process struct {
*hcs.Process hcs hcsshim.Process
id string
pid uint32
io *pipeSet
status runtime.Status
task *task
exitCh chan struct{}
exitCode uint32
exitTime time.Time
}
func (p *process) ID() string {
return p.id
} }
func (p *process) State(ctx context.Context) (runtime.State, error) { func (p *process) State(ctx context.Context) (runtime.State, error) {
return runtime.State{ return runtime.State{
Pid: p.Pid(), Status: p.Status(),
Status: p.Status(), Pid: p.pid,
Stdin: p.io.src.Stdin,
Stdout: p.io.src.Stdout,
Stderr: p.io.src.Stderr,
Terminal: p.io.src.Terminal,
}, nil }, nil
} }
func (p *process) Kill(ctx context.Context, sig uint32, all bool) error {
return p.Process.Kill()
}
func (p *process) Status() runtime.Status { func (p *process) Status() runtime.Status {
return p.Process.Status() if p.task.getStatus() == runtime.PausedStatus {
return runtime.PausedStatus
}
var status runtime.Status
select {
case <-p.exitCh:
status = runtime.StoppedStatus
default:
status = runtime.RunningStatus
}
return status
} }
func (p *process) Pid() uint32 { func (p *process) Kill(ctx context.Context, sig uint32, all bool) error {
return p.Process.Pid() // On windows all signals kill the process
} return errors.Wrap(p.hcs.Kill(), "failed to kill process")
func (p *process) CloseIO(ctx context.Context) error {
return p.Process.CloseStdin()
} }
func (p *process) ResizePty(ctx context.Context, size runtime.ConsoleSize) error { func (p *process) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
return p.Process.ResizeConsole(uint16(size.Width), uint16(size.Height)) err := p.hcs.ResizeConsole(uint16(size.Width), uint16(size.Height))
return errors.Wrap(err, "failed to resize process console")
}
func (p *process) CloseIO(ctx context.Context) error {
return errors.Wrap(p.hcs.CloseStdin(), "failed to close stdin")
}
func (p *process) Pid() uint32 {
return p.pid
}
func (p *process) ExitCode() (uint32, time.Time, error) {
if p.Status() != runtime.StoppedStatus {
return 255, time.Time{}, errors.Wrap(errdefs.ErrFailedPrecondition, "process is not stopped")
}
return p.exitCode, p.exitTime, nil
} }

View File

@ -6,166 +6,407 @@ import (
"context" "context"
"fmt" "fmt"
"os" "os"
"path/filepath"
"sync" "sync"
"time"
"github.com/Microsoft/hcsshim"
"github.com/Sirupsen/logrus"
"github.com/boltdb/bolt"
eventsapi "github.com/containerd/containerd/api/services/events/v1"
containerdtypes "github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/plugin" "github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime"
"github.com/containerd/containerd/typeurl" "github.com/containerd/containerd/typeurl"
"github.com/containerd/containerd/windows/hcs" "github.com/containerd/containerd/windows/hcsshimopts"
"github.com/containerd/containerd/windows/pid"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
const ( const (
runtimeName = "windows" runtimeName = "windows"
owner = "containerd" hcsshimOwner = "containerd"
defaultTerminateDuration = 5 * time.Minute
) )
var _ = (runtime.Runtime)(&Runtime{}) var (
pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtimeName)
)
var _ = (runtime.Runtime)(&windowsRuntime{})
func init() { func init() {
plugin.Register(&plugin.Registration{ plugin.Register(&plugin.Registration{
ID: "windows", ID: runtimeName,
Type: plugin.RuntimePlugin, Type: plugin.RuntimePlugin,
Init: New, Init: New,
Requires: []plugin.PluginType{
plugin.MetadataPlugin,
},
}) })
typeurl.Register(&RuntimeSpec{}, "windows/Spec")
} }
func New(ic *plugin.InitContext) (interface{}, error) { func New(ic *plugin.InitContext) (interface{}, error) {
rootDir := filepath.Join(ic.Root) if err := os.MkdirAll(ic.Root, 0700); err != nil {
if err := os.MkdirAll(rootDir, 0755); err != nil { return nil, errors.Wrapf(err, "could not create state directory at %s", ic.Root)
return nil, errors.Wrapf(err, "could not create state directory at %s", rootDir)
} }
c, cancel := context.WithCancel(ic.Context) m, err := ic.Get(plugin.MetadataPlugin)
r := &Runtime{
pidPool: pid.NewPool(),
containers: make(map[string]*container),
events: make(chan interface{}, 2048),
eventsContext: c,
eventsCancel: cancel,
rootDir: rootDir,
hcs: hcs.New(owner, rootDir),
}
// Terminate all previous container that we may have started. We don't
// support restoring containers
ctrs, err := loadContainers(ic.Context, r.hcs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, c := range ctrs { r := &windowsRuntime{
c.ctr.Delete(ic.Context) root: ic.Root,
//r.sendEvent(c.ctr.ID(), events.RuntimeEvent_EXIT, c.ctr.Pid(), 255, time.Time{}) pidPool: newPidPool(),
events: make(chan interface{}, 4096),
emitter: ic.Emitter,
// TODO(mlaventure): windows needs a stat monitor
monitor: nil,
tasks: runtime.NewTaskList(),
db: m.(*bolt.DB),
} }
// Try to delete the old state dir and recreate it // Load our existing containers and kill/delete them. We don't support
stateDir := filepath.Join(ic.Root, "state") // reattaching to them
if err := os.RemoveAll(stateDir); err != nil { r.cleanup(ic.Context)
log.G(c).WithError(err).Warnf("failed to cleanup old state directory at %s", stateDir)
}
if err := os.MkdirAll(stateDir, 0755); err != nil {
return nil, errors.Wrapf(err, "could not create state directory at %s", stateDir)
}
r.stateDir = stateDir
return r, nil return r, nil
} }
type Runtime struct { type windowsRuntime struct {
sync.Mutex sync.Mutex
rootDir string root string
stateDir string pidPool *pidPool
pidPool *pid.Pool
hcs *hcs.HCS emitter *events.Emitter
events chan interface{}
containers map[string]*container monitor runtime.TaskMonitor
tasks *runtime.TaskList
events chan interface{} db *bolt.DB
eventsContext context.Context
eventsCancel func()
} }
type RuntimeSpec struct { func (r *windowsRuntime) ID() string {
// Spec is the OCI spec return pluginID
OCISpec specs.Spec
// HCS specific options
hcs.Configuration
} }
func (r *Runtime) ID() string { func (r *windowsRuntime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (runtime.Task, error) {
return fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtimeName) namespace, err := namespaces.NamespaceRequired(ctx)
}
func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (runtime.Task, error) {
v, err := typeurl.UnmarshalAny(opts.Spec)
if err != nil {
return nil, err
}
rtSpec := v.(*RuntimeSpec)
ctr, err := newContainer(ctx, r.hcs, id, rtSpec, opts.IO)
if err != nil { if err != nil {
return nil, err return nil, err
} }
r.Lock() s, err := typeurl.UnmarshalAny(opts.Spec)
r.containers[id] = ctr if err != nil {
r.Unlock() return nil, err
}
spec := s.(*specs.Spec)
return ctr, nil var createOpts *hcsshimopts.CreateOptions
if opts.Options != nil {
o, err := typeurl.UnmarshalAny(opts.Options)
if err != nil {
return nil, err
}
createOpts = o.(*hcsshimopts.CreateOptions)
} else {
createOpts = &hcsshimopts.CreateOptions{}
}
if createOpts.TerminateDuration == 0 {
createOpts.TerminateDuration = defaultTerminateDuration
}
return r.newTask(ctx, namespace, id, spec, opts.IO, createOpts)
} }
func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, error) { func (r *windowsRuntime) Get(ctx context.Context, id string) (runtime.Task, error) {
wc, ok := c.(*container) return r.tasks.Get(ctx, id)
}
func (r *windowsRuntime) Tasks(ctx context.Context) ([]runtime.Task, error) {
return r.tasks.GetAll(ctx)
}
func (r *windowsRuntime) Delete(ctx context.Context, t runtime.Task) (*runtime.Exit, error) {
wt, ok := t.(*task)
if !ok { if !ok {
return nil, fmt.Errorf("container cannot be cast as *windows.container") return nil, errors.Wrap(errdefs.ErrInvalidArgument, "no a windows task")
}
ec, err := wc.ctr.ExitCode()
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to retrieve exit code for container %s", wc.ctr.ID())
} }
wc.ctr.Delete(ctx) // TODO(mlaventure): stop monitor on this task
r.Lock() var (
delete(r.containers, wc.ctr.ID()) err error
r.Unlock() needServicing bool
state, _ = wt.State(ctx)
)
switch state.Status {
case runtime.StoppedStatus:
// Only try to service a container if it was started and it's not a
// servicing task itself
if wt.servicing == false {
needServicing, err = wt.hcsContainer.HasPendingUpdates()
if err != nil {
needServicing = false
log.G(ctx).WithError(err).
WithFields(logrus.Fields{"id": wt.id, "pid": wt.pid}).
Error("failed to check if container needs servicing")
}
}
fallthrough
case runtime.CreatedStatus:
// if it's stopped or in created state, we need to shutdown the
// container before removing it
if err = wt.stop(ctx); err != nil {
return nil, err
}
default:
return nil, errors.Wrap(errdefs.ErrFailedPrecondition,
"cannot delete a non-stopped task")
}
return &runtime.Exit{ var rtExit *runtime.Exit
Status: ec, if p := wt.getProcess(t.ID()); p != nil {
Timestamp: wc.ctr.Processes()[0].ExitedAt(), ec, ea, err := p.ExitCode()
}, nil if err != nil {
} return nil, err
}
func (r *Runtime) Tasks(ctx context.Context) ([]runtime.Task, error) { rtExit = &runtime.Exit{
r.Lock() Pid: wt.pid,
defer r.Unlock() Status: ec,
list := make([]runtime.Task, len(r.containers)) Timestamp: ea,
for _, c := range r.containers { }
select { } else {
case <-ctx.Done(): rtExit = &runtime.Exit{
return nil, ctx.Err() Pid: wt.pid,
default: Status: 255,
list = append(list, c) Timestamp: time.Now(),
} }
} }
return list, nil
wt.cleanup()
r.tasks.Delete(ctx, t)
r.emitter.Post(events.WithTopic(ctx, runtime.TaskDeleteEventTopic),
&eventsapi.TaskDelete{
ContainerID: wt.id,
Pid: wt.pid,
ExitStatus: rtExit.Status,
ExitedAt: rtExit.Timestamp,
})
if needServicing {
ns, _ := namespaces.Namespace(ctx)
serviceCtx := log.WithLogger(context.Background(), log.GetLogger(ctx))
serviceCtx = namespaces.WithNamespace(serviceCtx, ns)
r.serviceTask(serviceCtx, ns, wt.id+"_servicing", wt.spec)
}
// We were never started, return failure
return rtExit, nil
} }
func (r *Runtime) Get(ctx context.Context, id string) (runtime.Task, error) { func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec *specs.Spec, io runtime.IO, createOpts *hcsshimopts.CreateOptions) (*task, error) {
r.Lock() var (
defer r.Unlock() err error
c, ok := r.containers[id] pset *pipeSet
if !ok { )
return nil, fmt.Errorf("container %s does not exit", id)
if pset, err = newPipeSet(ctx, io); err != nil {
return nil, err
}
defer func() {
if err != nil {
pset.Close()
}
}()
var pid uint32
if pid, err = r.pidPool.Get(); err != nil {
return nil, err
}
defer func() {
if err != nil {
r.pidPool.Put(pid)
}
}()
var (
conf *hcsshim.ContainerConfig
nsid = namespace + "-" + id
)
if conf, err = newContainerConfig(ctx, hcsshimOwner, nsid, spec); err != nil {
return nil, err
}
defer func() {
if err != nil {
removeLayer(ctx, conf.LayerFolderPath)
}
}()
// TODO: remove this once we have a windows snapshotter
// Store the LayerFolder in the db so we can clean it if we die
if err = r.db.Update(func(tx *bolt.Tx) error {
s := newLayerFolderStore(tx)
return s.Create(nsid, conf.LayerFolderPath)
}); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dbErr := r.db.Update(func(tx *bolt.Tx) error {
s := newLayerFolderStore(tx)
return s.Delete(nsid)
}); dbErr != nil {
log.G(ctx).WithField("id", id).
Error("failed to remove key from metadata")
}
}
}()
ctr, err := hcsshim.CreateContainer(nsid, conf)
if err != nil {
return nil, errors.Wrapf(err, "hcsshim failed to create task")
}
defer func() {
if err != nil {
ctr.Terminate()
ctr.Wait()
ctr.Close()
}
}()
if err = ctr.Start(); err != nil {
return nil, errors.Wrap(err, "hcsshim failed to spawn task")
}
t := &task{
id: id,
namespace: namespace,
pid: pid,
io: pset,
status: runtime.CreatedStatus,
spec: spec,
processes: make(map[string]*process),
hyperV: spec.Windows.HyperV != nil,
emitter: r.emitter,
rwLayer: conf.LayerFolderPath,
pidPool: r.pidPool,
hcsContainer: ctr,
terminateDuration: createOpts.TerminateDuration,
}
r.tasks.Add(ctx, t)
var rootfs []*containerdtypes.Mount
for _, l := range append([]string{t.rwLayer}, spec.Windows.LayerFolders...) {
rootfs = append(rootfs, &containerdtypes.Mount{
Type: "windows-layer",
Source: l,
})
}
r.emitter.Post(events.WithTopic(ctx, runtime.TaskCreateEventTopic),
&eventsapi.TaskCreate{
ContainerID: id,
IO: &eventsapi.TaskIO{
Stdin: io.Stdin,
Stdout: io.Stdout,
Stderr: io.Stderr,
Terminal: io.Terminal,
},
Pid: t.pid,
Rootfs: rootfs,
// TODO: what should be in Bundle for windows?
})
return t, nil
}
func (r *windowsRuntime) cleanup(ctx context.Context) {
cp, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{
Types: []string{"Container"},
Owners: []string{hcsshimOwner},
})
if err != nil {
log.G(ctx).Warn("failed to retrieve running containers")
return
}
for _, p := range cp {
container, err := hcsshim.OpenContainer(p.ID)
if err != nil {
log.G(ctx).Warnf("failed open container %s", p.ID)
continue
}
err = container.Terminate()
if err == nil || hcsshim.IsPending(err) || hcsshim.IsAlreadyStopped(err) {
container.Wait()
}
container.Close()
// TODO: remove this once we have a windows snapshotter
var layerFolderPath string
if err := r.db.View(func(tx *bolt.Tx) error {
s := newLayerFolderStore(tx)
l, e := s.Get(p.ID)
if err == nil {
layerFolderPath = l
}
return e
}); err == nil && layerFolderPath != "" {
removeLayer(ctx, layerFolderPath)
if dbErr := r.db.Update(func(tx *bolt.Tx) error {
s := newLayerFolderStore(tx)
return s.Delete(p.ID)
}); dbErr != nil {
log.G(ctx).WithField("id", p.ID).
Error("failed to remove key from metadata")
}
} else {
log.G(ctx).WithField("id", p.ID).
Debug("key not found in metadata, R/W layer may be leaked")
}
}
}
func (r *windowsRuntime) serviceTask(ctx context.Context, namespace, id string, spec *specs.Spec) {
var (
err error
t *task
io runtime.IO
createOpts = &hcsshimopts.CreateOptions{
TerminateDuration: defaultTerminateDuration,
}
)
t, err = r.newTask(ctx, namespace, id, spec, io, createOpts)
if err != nil {
log.G(ctx).WithError(err).WithField("id", id).
Warn("failed to created servicing task")
return
}
t.servicing = true
err = t.Start(ctx)
switch err {
case nil:
<-t.getProcess(id).exitCh
default:
log.G(ctx).WithError(err).WithField("id", id).
Warn("failed to start servicing task")
}
if _, err = r.Delete(ctx, t); err != nil {
log.G(ctx).WithError(err).WithField("id", id).
Warn("failed to stop servicing task")
} }
return c, nil
} }

441
windows/task.go Normal file
View File

@ -0,0 +1,441 @@
// +build windows
package windows
import (
"context"
"io"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim"
"github.com/Sirupsen/logrus"
eventsapi "github.com/containerd/containerd/api/services/events/v1"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/runtime"
"github.com/containerd/containerd/typeurl"
"github.com/gogo/protobuf/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
type task struct {
sync.Mutex
id string
namespace string
pid uint32
io *pipeSet
status runtime.Status
spec *specs.Spec
processes map[string]*process
hyperV bool
emitter *events.Emitter
rwLayer string
pidPool *pidPool
hcsContainer hcsshim.Container
terminateDuration time.Duration
servicing bool
}
func (t *task) ID() string {
return t.id
}
func (t *task) State(ctx context.Context) (runtime.State, error) {
var status runtime.Status
if p := t.getProcess(t.id); p != nil {
status = p.Status()
} else {
status = t.getStatus()
}
return runtime.State{
Status: status,
Pid: t.pid,
Stdin: t.io.src.Stdin,
Stdout: t.io.src.Stdout,
Stderr: t.io.src.Stderr,
Terminal: t.io.src.Terminal,
}, nil
}
func (t *task) Kill(ctx context.Context, signal uint32, all bool) error {
p := t.getProcess(t.id)
if p == nil {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "task is not running")
}
if p.Status() == runtime.StoppedStatus {
return errors.Wrapf(errdefs.ErrNotFound, "process is stopped")
}
return p.Kill(ctx, signal, all)
}
func (t *task) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
p := t.getProcess(t.id)
if p == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "task not started")
}
return p.ResizePty(ctx, size)
}
func (t *task) CloseIO(ctx context.Context) error {
p := t.getProcess(t.id)
if p == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "task not started")
}
return p.hcs.CloseStdin()
}
func (t *task) Info() runtime.TaskInfo {
return runtime.TaskInfo{
ID: t.id,
Runtime: pluginID,
Namespace: t.namespace,
// TODO(mlaventure): what about Spec? I think this could be removed from the info, the id is enough since it matches the one from the container
}
}
func (t *task) Start(ctx context.Context) error {
conf := newProcessConfig(t.spec.Process, t.io)
if _, err := t.newProcess(ctx, t.id, conf, t.io); err != nil {
return err
}
t.emitter.Post(events.WithTopic(ctx, runtime.TaskStartEventTopic),
&eventsapi.TaskStart{
ContainerID: t.id,
Pid: t.pid,
})
return nil
}
func (t *task) Pause(ctx context.Context) error {
if t.hyperV {
err := t.hcsContainer.Pause()
if err == nil {
t.Lock()
t.status = runtime.PausedStatus
t.Unlock()
}
if err == nil {
t.emitter.Post(events.WithTopic(ctx, runtime.TaskPausedEventTopic),
&eventsapi.TaskPaused{
ContainerID: t.id,
})
}
return errors.Wrap(err, "hcsshim failed to pause task")
}
return errors.Wrap(errdefs.ErrFailedPrecondition, "not an hyperV task")
}
func (t *task) Resume(ctx context.Context) error {
if t.hyperV {
err := t.hcsContainer.Resume()
if err == nil {
t.Lock()
t.status = runtime.RunningStatus
t.Unlock()
}
if err == nil {
t.emitter.Post(events.WithTopic(ctx, runtime.TaskResumedEventTopic),
&eventsapi.TaskResumed{
ContainerID: t.id,
})
}
return errors.Wrap(err, "hcsshim failed to resume task")
}
return errors.Wrap(errdefs.ErrFailedPrecondition, "not an hyperV task")
}
func (t *task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) {
if p := t.getProcess(t.id); p == nil {
return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "task not started")
}
if p := t.getProcess(id); p != nil {
return nil, errors.Wrap(errdefs.ErrAlreadyExists, "id already in use")
}
s, err := typeurl.UnmarshalAny(opts.Spec)
if err != nil {
return nil, err
}
spec := s.(*specs.Process)
if spec.Cwd == "" {
spec.Cwd = t.spec.Process.Cwd
}
var pset *pipeSet
if pset, err = newPipeSet(ctx, opts.IO); err != nil {
return nil, err
}
defer func() {
if err != nil {
pset.Close()
}
}()
conf := newProcessConfig(spec, pset)
p, err := t.newProcess(ctx, id, conf, pset)
if err != nil {
return nil, err
}
t.emitter.Post(events.WithTopic(ctx, runtime.TaskExecAddedEventTopic),
&eventsapi.TaskExecAdded{
ContainerID: t.id,
ExecID: id,
Pid: p.Pid(),
})
return p, nil
}
func (t *task) Pids(ctx context.Context) ([]uint32, error) {
t.Lock()
defer t.Unlock()
var (
pids = make([]uint32, len(t.processes))
idx = 0
)
for _, p := range t.processes {
pids[idx] = p.Pid()
idx++
}
return pids, nil
}
func (t *task) Checkpoint(_ context.Context, _ string, _ *types.Any) error {
return errors.Wrap(errdefs.ErrUnavailable, "not supported")
}
func (t *task) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, error) {
if id == t.id {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument,
"cannot delete init process")
}
if p := t.getProcess(id); p != nil {
ec, ea, err := p.ExitCode()
if err != nil {
return nil, err
}
t.removeProcess(id)
return &runtime.Exit{
Pid: p.pid,
Status: ec,
Timestamp: ea,
}, nil
}
return nil, errors.Wrapf(errdefs.ErrNotFound, "no such process %s", id)
}
func (t *task) Update(ctx context.Context, resources *types.Any) error {
return errors.Wrap(errdefs.ErrUnavailable, "not supported")
}
func (t *task) Process(ctx context.Context, id string) (p runtime.Process, err error) {
p = t.getProcess(id)
if p == nil {
err = errors.Wrapf(errdefs.ErrNotFound, "no such process %d", id)
}
return p, err
}
func (t *task) newProcess(ctx context.Context, id string, conf *hcsshim.ProcessConfig, pset *pipeSet) (*process, error) {
var (
err error
pid uint32
)
// If we fail, close the io right now
defer func() {
if err != nil {
pset.Close()
}
}()
t.Lock()
if len(t.processes) == 0 {
pid = t.pid
} else {
if pid, err = t.pidPool.Get(); err != nil {
t.Unlock()
return nil, err
}
defer func() {
if err != nil {
t.pidPool.Put(pid)
}
}()
}
t.Unlock()
var p hcsshim.Process
if p, err = t.hcsContainer.CreateProcess(conf); err != nil {
return nil, errors.Wrapf(err, "failed to create process")
}
stdin, stdout, stderr, err := p.Stdio()
if err != nil {
p.Kill()
return nil, errors.Wrapf(err, "failed to retrieve init process stdio")
}
ioCopy := func(name string, dst io.WriteCloser, src io.ReadCloser) {
log.G(ctx).WithFields(logrus.Fields{"id": id, "pid": pid}).
Debugf("%s: copy started", name)
io.Copy(dst, src)
log.G(ctx).WithFields(logrus.Fields{"id": id, "pid": pid}).
Debugf("%s: copy done", name)
dst.Close()
src.Close()
}
if pset.stdin != nil {
go ioCopy("stdin", stdin, pset.stdin)
}
if pset.stdout != nil {
go ioCopy("stdout", pset.stdout, stdout)
}
if pset.stderr != nil {
go ioCopy("stderr", pset.stderr, stderr)
}
t.Lock()
wp := &process{
id: id,
pid: pid,
io: pset,
status: runtime.RunningStatus,
task: t,
hcs: p,
exitCh: make(chan struct{}),
}
t.processes[id] = wp
t.Unlock()
// Wait for the process to exit to get the exit status
go func() {
if err := p.Wait(); err != nil {
herr, ok := err.(*hcsshim.ProcessError)
if ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
log.G(ctx).
WithError(err).
WithFields(logrus.Fields{"id": id, "pid": pid}).
Warnf("hcsshim wait failed (process may have been killed)")
}
// Try to get the exit code nonetheless
}
wp.exitTime = time.Now()
ec, err := p.ExitCode()
if err != nil {
log.G(ctx).
WithError(err).
WithFields(logrus.Fields{"id": id, "pid": pid}).
Warnf("hcsshim could not retrieve exit code")
// Use the unknown exit code
ec = 255
}
wp.exitCode = uint32(ec)
t.emitter.Post(events.WithTopic(ctx, runtime.TaskExitEventTopic),
&eventsapi.TaskExit{
ContainerID: t.id,
ID: id,
Pid: pid,
ExitStatus: wp.exitCode,
ExitedAt: wp.exitTime,
})
close(wp.exitCh)
// Ensure io's are closed
pset.Close()
// Cleanup HCS resources
p.Close()
}()
return wp, nil
}
func (t *task) getProcess(id string) *process {
t.Lock()
p := t.processes[id]
t.Unlock()
return p
}
func (t *task) removeProcessNL(id string) {
if p, ok := t.processes[id]; ok {
if p.io != nil {
p.io.Close()
}
t.pidPool.Put(p.pid)
delete(t.processes, id)
}
}
func (t *task) removeProcess(id string) {
t.Lock()
t.removeProcessNL(id)
t.Unlock()
}
func (t *task) getStatus() runtime.Status {
t.Lock()
status := t.status
t.Unlock()
return status
}
// stop tries to shutdown the task.
// It will do so by first calling Shutdown on the hcsshim.Container and if
// that fails, by resorting to caling Terminate
func (t *task) stop(ctx context.Context) error {
if err := t.hcsStop(ctx, t.hcsContainer.Shutdown); err != nil {
return t.hcsStop(ctx, t.hcsContainer.Terminate)
}
t.hcsContainer.Close()
return nil
}
func (t *task) hcsStop(ctx context.Context, stop func() error) error {
err := stop()
switch {
case hcsshim.IsPending(err):
err = t.hcsContainer.WaitTimeout(t.terminateDuration)
case hcsshim.IsAlreadyStopped(err):
err = nil
}
return err
}
func (t *task) cleanup() {
t.Lock()
for _, p := range t.processes {
t.removeProcessNL(p.id)
}
removeLayer(context.Background(), t.rwLayer)
t.Unlock()
}