Merge pull request #1196 from mlaventure/update-windows-runtime
Update windows runtime
This commit is contained in:
commit
dd7642fc1c
30
.appveyor.yml
Normal file
30
.appveyor.yml
Normal file
@ -0,0 +1,30 @@
|
||||
version: "{build}"
|
||||
|
||||
image: Visual Studio 2017
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\containerd\containerd
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CGO_ENABLED: 1
|
||||
|
||||
before_build:
|
||||
- choco install -y mingw
|
||||
- choco install codecov
|
||||
|
||||
build_script:
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe fmt"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe vet"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe build"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe binaries"
|
||||
|
||||
test_script:
|
||||
# TODO: need an equivalent of TRAVIS_COMMIT_RANGE
|
||||
# - GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" C:\MinGW\bin\mingw32-make.exe dco
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe integration"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe coverage"
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe root-coverage"
|
||||
|
||||
on_success:
|
||||
# Note that, a Codecov upload token is not required.
|
||||
- codecov -f coverage.txt
|
17
Makefile
17
Makefile
@ -9,15 +9,18 @@ VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||
|
||||
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
else
|
||||
GOOS ?= $$GOOS
|
||||
GOOS ?= $$GOOS
|
||||
endif
|
||||
|
||||
WHALE = "🇩"
|
||||
ONI = "👹"
|
||||
FIX_PATH = $1
|
||||
ifeq ("$(OS)", "Windows_NT")
|
||||
WHALE="+"
|
||||
ONI="-"
|
||||
FIX_PATH = $(subst /,\,$1)
|
||||
endif
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
@ -44,7 +47,7 @@ GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
||||
GO_LDFLAGS=-ldflags "-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)"
|
||||
|
||||
# Flags passed to `go test`
|
||||
TESTFLAGS ?=-parallel 8 -race
|
||||
TESTFLAGS ?=-parallel 8 -race -v
|
||||
|
||||
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration setup generate protos checkprotos coverage ci check help install uninstall vendor release
|
||||
.DEFAULT: default
|
||||
@ -88,7 +91,7 @@ vet: binaries ## run go vet
|
||||
|
||||
fmt: ## run go fmt
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(gofmt -s -l . | grep -v vendor/ | grep -v ".pb.go$$" | tee /dev/stderr)" || \
|
||||
@test -z "$$(gofmt -s -l . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go$$" | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) please format Go code with 'gofmt -s -w'" && false)
|
||||
@test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) please indent proto files with tabs only" && false)
|
||||
@ -97,7 +100,7 @@ fmt: ## run go fmt
|
||||
|
||||
lint: ## run go lint
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(golint ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
@test -z "$$(golint ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
|
||||
dco: ## dco check
|
||||
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
|
||||
@ -109,11 +112,11 @@ endif
|
||||
|
||||
ineffassign: ## run ineffassign
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(ineffassign . | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
@test -z "$$(ineffassign . | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
|
||||
#errcheck: ## run go errcheck
|
||||
# @echo "$(WHALE) $@"
|
||||
# @test -z "$$(errcheck ./... | grep -v vendor/ | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
# @test -z "$$(errcheck ./... | grep -Fv $(call FIX_PATH,'vendor/') | grep -v ".pb.go:" | tee /dev/stderr)"
|
||||
|
||||
build: ## build the go packages
|
||||
@echo "$(WHALE) $@"
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
|
@ -20,7 +20,7 @@ func BenchmarkContainerCreate(b *testing.B) {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("true"))
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), withTrue())
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
@ -63,7 +63,7 @@ func BenchmarkContainerStart(b *testing.B) {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("true"))
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), withTrue())
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
|
10
client.go
10
client.go
@ -7,7 +7,6 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -34,11 +33,9 @@ import (
|
||||
imagesservice "github.com/containerd/containerd/services/images"
|
||||
snapshotservice "github.com/containerd/containerd/services/snapshot"
|
||||
"github.com/containerd/containerd/snapshot"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
pempty "github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
@ -48,13 +45,6 @@ import (
|
||||
func init() {
|
||||
// reset the grpc logger so that it does not output in the STDIO of the calling process
|
||||
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
||||
|
||||
// register TypeUrls for commonly marshaled external types
|
||||
major := strconv.Itoa(specs.VersionMajor)
|
||||
typeurl.Register(&specs.Spec{}, "opencontainers/runtime-spec", major, "Spec")
|
||||
typeurl.Register(&specs.Process{}, "opencontainers/runtime-spec", major, "Process")
|
||||
typeurl.Register(&specs.LinuxResources{}, "opencontainers/runtime-spec", major, "LinuxResources")
|
||||
typeurl.Register(&specs.WindowsResources{}, "opencontainers/runtime-spec", major, "WindowsResources")
|
||||
}
|
||||
|
||||
type clientOpts struct {
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
@ -17,11 +18,6 @@ import (
|
||||
"github.com/containerd/containerd/testutil"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRoot = "/var/lib/containerd-test"
|
||||
testImage = "docker.io/library/alpine:latest"
|
||||
)
|
||||
|
||||
var (
|
||||
address string
|
||||
noDaemon bool
|
||||
@ -29,7 +25,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&address, "address", "/run/containerd-test/containerd.sock", "The address to the containerd socket for use in the tests")
|
||||
flag.StringVar(&address, "address", defaultAddress, "The address to the containerd socket for use in the tests")
|
||||
flag.BoolVar(&noDaemon, "no-daemon", false, "Do not start a dedicated daemon for the tests")
|
||||
flag.Parse()
|
||||
}
|
||||
@ -57,11 +53,15 @@ func TestMain(m *testing.M) {
|
||||
defer cancel()
|
||||
|
||||
if !noDaemon {
|
||||
os.RemoveAll(defaultRoot)
|
||||
|
||||
// setup a new containerd daemon if !testing.Short
|
||||
cmd = exec.Command("containerd",
|
||||
"--root", defaultRoot,
|
||||
"--address", address,
|
||||
"--log-level", "debug",
|
||||
)
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = buf
|
||||
if err := cmd.Start(); err != nil {
|
||||
cmd.Wait()
|
||||
@ -94,14 +94,22 @@ func TestMain(m *testing.M) {
|
||||
}).Info("running tests against containerd")
|
||||
|
||||
// pull a seed image
|
||||
if _, err = client.Pull(ctx, testImage, WithPullUnpack); err != nil {
|
||||
cmd.Process.Signal(syscall.SIGTERM)
|
||||
cmd.Wait()
|
||||
fmt.Fprintf(os.Stderr, "%s: %s", err, buf.String())
|
||||
if runtime.GOOS != "windows" { // TODO: remove once pull is supported on windows
|
||||
if _, err = client.Pull(ctx, testImage, WithPullUnpack); err != nil {
|
||||
cmd.Process.Signal(syscall.SIGTERM)
|
||||
cmd.Wait()
|
||||
fmt.Fprintf(os.Stderr, "%s: %s", err, buf.String())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if err := platformTestSetup(client); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "platform test setup failed", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := client.Close(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, "failed to close client", err)
|
||||
}
|
||||
|
||||
// run the test
|
||||
@ -110,13 +118,15 @@ func TestMain(m *testing.M) {
|
||||
if !noDaemon {
|
||||
// tear down the daemon and resources created
|
||||
if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to signal containerd", err)
|
||||
}
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, "failed to wait for containerd", err)
|
||||
}
|
||||
if err := os.RemoveAll(defaultRoot); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, "failed to remove test root dir", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// only print containerd logs if the test failed
|
||||
@ -171,6 +181,11 @@ func TestNewClient(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImagePull(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// TODO: remove once Windows has a snapshotter
|
||||
t.Skip("Windows does not have a snapshotter yet")
|
||||
}
|
||||
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -9,9 +9,6 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultAddress is the default unix socket address
|
||||
const DefaultAddress = "/run/containerd/containerd.sock"
|
||||
|
||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||
address = strings.TrimPrefix(address, "unix://")
|
||||
return net.DialTimeout("unix", address, timeout)
|
||||
|
13
client_unix_test.go
Normal file
13
client_unix_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !windows
|
||||
|
||||
package containerd
|
||||
|
||||
const (
|
||||
defaultRoot = "/var/lib/containerd-test"
|
||||
defaultAddress = "/run/containerd-test/containerd.sock"
|
||||
testImage = "docker.io/library/alpine:latest"
|
||||
)
|
||||
|
||||
func platformTestSetup(client *Client) error {
|
||||
return nil
|
||||
}
|
@ -7,9 +7,6 @@ import (
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
// DefaultAddress is the default unix socket address
|
||||
const DefaultAddress = `\\.\pipe\containerd-containerd`
|
||||
|
||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||
return winio.DialPipe(address, &timeout)
|
||||
}
|
||||
|
87
client_windows_test.go
Normal file
87
client_windows_test.go
Normal file
@ -0,0 +1,87 @@
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAddress = `\\.\pipe\containerd-containerd-test`
|
||||
testImage = "docker.io/library/go:nanoserver"
|
||||
)
|
||||
|
||||
var (
|
||||
dockerLayerFolders []string
|
||||
|
||||
defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test")
|
||||
)
|
||||
|
||||
func platformTestSetup(client *Client) error {
|
||||
var (
|
||||
roots []string
|
||||
layerChains = make(map[string]string)
|
||||
)
|
||||
// Since we can't pull images yet, we'll piggyback on the default
|
||||
// docker's images
|
||||
wfPath := `C:\ProgramData\docker\windowsfilter`
|
||||
wf, err := os.Open(wfPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to access docker layers @ %s", wfPath)
|
||||
}
|
||||
defer wf.Close()
|
||||
entries, err := wf.Readdirnames(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read %s entries", wfPath)
|
||||
}
|
||||
|
||||
for _, fn := range entries {
|
||||
layerChainPath := filepath.Join(wfPath, fn, "layerchain.json")
|
||||
lfi, err := os.Stat(layerChainPath)
|
||||
switch {
|
||||
case err == nil && lfi.Mode().IsRegular():
|
||||
f, err := os.OpenFile(layerChainPath, os.O_RDONLY, 0660)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr,
|
||||
errors.Wrapf(err, "failed to open %s", layerChainPath))
|
||||
continue
|
||||
}
|
||||
defer f.Close()
|
||||
l := make([]string, 0)
|
||||
if err := json.NewDecoder(f).Decode(&l); err != nil {
|
||||
fmt.Fprintln(os.Stderr,
|
||||
errors.Wrapf(err, "failed to decode %s", layerChainPath))
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case len(l) == 1:
|
||||
layerChains[l[0]] = filepath.Join(wfPath, fn)
|
||||
case len(l) > 1:
|
||||
fmt.Fprintf(os.Stderr, "Too many entries in %s: %d", layerChainPath, len(l))
|
||||
case len(l) == 0:
|
||||
roots = append(roots, filepath.Join(wfPath, fn))
|
||||
}
|
||||
case os.IsNotExist(err):
|
||||
// keep on going
|
||||
default:
|
||||
return errors.Wrapf(err, "error trying to access %s", layerChainPath)
|
||||
}
|
||||
}
|
||||
|
||||
// They'll be 2 roots, just take the first one
|
||||
l := roots[0]
|
||||
dockerLayerFolders = append(dockerLayerFolders, l)
|
||||
for {
|
||||
l = layerChains[l]
|
||||
if l == "" {
|
||||
break
|
||||
}
|
||||
|
||||
dockerLayerFolders = append([]string{l}, dockerLayerFolders...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/server"
|
||||
)
|
||||
|
||||
@ -9,7 +8,7 @@ func defaultConfig() *server.Config {
|
||||
return &server.Config{
|
||||
Root: "/var/lib/containerd",
|
||||
GRPC: server.GRPCConfig{
|
||||
Address: containerd.DefaultAddress,
|
||||
Address: server.DefaultAddress,
|
||||
},
|
||||
Debug: server.Debug{
|
||||
Level: "info",
|
||||
|
@ -8,7 +8,7 @@ func defaultConfig() *server.Config {
|
||||
return &server.Config{
|
||||
Root: "/var/lib/containerd",
|
||||
GRPC: server.GRPCConfig{
|
||||
Address: "/run/containerd/containerd.sock",
|
||||
Address: server.DefaultAddress,
|
||||
},
|
||||
Debug: server.Debug{
|
||||
Level: "info",
|
||||
|
@ -40,12 +40,12 @@ var deleteCommand = cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status == containerd.Stopped {
|
||||
if status == containerd.Stopped || status == containerd.Created {
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Delete(ctx, deleteOpts...)
|
||||
}
|
||||
return fmt.Errorf("cannot delete a container with an existing task")
|
||||
return fmt.Errorf("cannot delete a non stopped container: %v", status)
|
||||
},
|
||||
}
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/server"
|
||||
"github.com/containerd/containerd/version"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@ -40,7 +40,7 @@ containerd CLI
|
||||
cli.StringFlag{
|
||||
Name: "address, a",
|
||||
Usage: "address for containerd's GRPC server",
|
||||
Value: containerd.DefaultAddress,
|
||||
Value: server.DefaultAddress,
|
||||
},
|
||||
cli.DurationFlag{
|
||||
Name: "timeout",
|
||||
@ -58,31 +58,32 @@ containerd CLI
|
||||
},
|
||||
}
|
||||
app.Commands = append([]cli.Command{
|
||||
imageCommand,
|
||||
pullCommand,
|
||||
applyCommand,
|
||||
attachCommand,
|
||||
checkpointCommand,
|
||||
containersCommand,
|
||||
contentCommand,
|
||||
deleteCommand,
|
||||
eventsCommand,
|
||||
execCommand,
|
||||
fetchCommand,
|
||||
fetchObjectCommand,
|
||||
pushCommand,
|
||||
pushObjectCommand,
|
||||
containersCommand,
|
||||
checkpointCommand,
|
||||
runCommand,
|
||||
attachCommand,
|
||||
deleteCommand,
|
||||
namespacesCommand,
|
||||
eventsCommand,
|
||||
taskListCommand,
|
||||
imageCommand,
|
||||
infoCommand,
|
||||
killCommand,
|
||||
pprofCommand,
|
||||
execCommand,
|
||||
namespacesCommand,
|
||||
pauseCommand,
|
||||
resumeCommand,
|
||||
snapshotCommand,
|
||||
versionCommand,
|
||||
pprofCommand,
|
||||
psCommand,
|
||||
applyCommand,
|
||||
pullCommand,
|
||||
pushCommand,
|
||||
pushObjectCommand,
|
||||
resumeCommand,
|
||||
rootfsCommand,
|
||||
runCommand,
|
||||
snapshotCommand,
|
||||
taskListCommand,
|
||||
versionCommand,
|
||||
}, extraCmds...)
|
||||
app.Before = func(context *cli.Context) error {
|
||||
if context.GlobalBool("debug") {
|
||||
|
@ -3,13 +3,12 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/server"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@ -25,7 +24,7 @@ var pprofCommand = cli.Command{
|
||||
cli.StringFlag{
|
||||
Name: "debug-socket, d",
|
||||
Usage: "socket path for containerd's debug server",
|
||||
Value: "/run/containerd/debug.sock",
|
||||
Value: server.DefaultDebugAddress,
|
||||
},
|
||||
},
|
||||
Subcommands: []cli.Command{
|
||||
@ -143,13 +142,8 @@ var pprofThreadcreateCommand = cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func (d *pprofDialer) pprofDial(proto, addr string) (conn net.Conn, err error) {
|
||||
return net.Dial(d.proto, d.addr)
|
||||
}
|
||||
|
||||
func getPProfClient(context *cli.Context) *http.Client {
|
||||
addr := context.GlobalString("debug-socket")
|
||||
dialer := pprofDialer{"unix", addr}
|
||||
dialer := getPProfDialer(context.GlobalString("debug-socket"))
|
||||
|
||||
tr := &http.Transport{
|
||||
Dial: dialer.pprofDial,
|
||||
|
13
cmd/ctr/pprof_unix.go
Normal file
13
cmd/ctr/pprof_unix.go
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import "net"
|
||||
|
||||
func (d *pprofDialer) pprofDial(proto, addr string) (conn net.Conn, err error) {
|
||||
return net.Dial(d.proto, d.addr)
|
||||
}
|
||||
|
||||
func getPProfDialer(addr string) *pprofDialer {
|
||||
return &pprofDialer{"unix", addr}
|
||||
}
|
15
cmd/ctr/pprof_windows.go
Normal file
15
cmd/ctr/pprof_windows.go
Normal file
@ -0,0 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
func (d *pprofDialer) pprofDial(proto, addr string) (conn net.Conn, err error) {
|
||||
return winio.DialPipe(d.addr, nil)
|
||||
}
|
||||
|
||||
func getPProfDialer(addr string) *pprofDialer {
|
||||
return &pprofDialer{"winpipe", addr}
|
||||
}
|
@ -25,7 +25,7 @@ func withEnv(context *cli.Context) containerd.SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
env := context.StringSlice("env")
|
||||
if len(env) > 0 {
|
||||
s.Process.Env = append(s.Process.Env, env...)
|
||||
s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, env)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2,22 +2,16 @@ package main
|
||||
|
||||
import (
|
||||
gocontext "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/windows"
|
||||
"github.com/containerd/containerd/windows/hcs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@ -25,126 +19,20 @@ const pipeRoot = `\\.\pipe`
|
||||
|
||||
func init() {
|
||||
runCommand.Flags = append(runCommand.Flags, cli.StringSliceFlag{
|
||||
Name: "layers",
|
||||
Name: "layer",
|
||||
Usage: "HCSSHIM Layers to be used",
|
||||
})
|
||||
}
|
||||
|
||||
func spec(id string, config *ocispec.ImageConfig, context *cli.Context) *specs.Spec {
|
||||
cmd := config.Cmd
|
||||
if a := context.Args().First(); a != "" {
|
||||
cmd = context.Args()
|
||||
}
|
||||
|
||||
var (
|
||||
// TODO: support overriding entrypoint
|
||||
args = append(config.Entrypoint, cmd...)
|
||||
tty = context.Bool("tty")
|
||||
cwd = config.WorkingDir
|
||||
)
|
||||
|
||||
if cwd == "" {
|
||||
cwd = `C:\`
|
||||
}
|
||||
|
||||
// Some sane defaults for console
|
||||
w := 80
|
||||
h := 20
|
||||
|
||||
if tty {
|
||||
con := console.Current()
|
||||
size, err := con.Size()
|
||||
if err == nil {
|
||||
w = int(size.Width)
|
||||
h = int(size.Height)
|
||||
func withLayers(context *cli.Context) containerd.SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
l := context.StringSlice("layer")
|
||||
if l == nil {
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "base layers must be specified with `--layer`")
|
||||
}
|
||||
s.Windows.LayerFolders = l
|
||||
return nil
|
||||
}
|
||||
|
||||
env := replaceOrAppendEnvValues(config.Env, context.StringSlice("env"))
|
||||
|
||||
return &specs.Spec{
|
||||
Version: specs.Version,
|
||||
Root: &specs.Root{
|
||||
Readonly: context.Bool("readonly"),
|
||||
},
|
||||
Process: &specs.Process{
|
||||
Args: args,
|
||||
Terminal: tty,
|
||||
Cwd: cwd,
|
||||
Env: env,
|
||||
User: specs.User{
|
||||
Username: config.User,
|
||||
},
|
||||
ConsoleSize: &specs.Box{
|
||||
Height: uint(w),
|
||||
Width: uint(h),
|
||||
},
|
||||
},
|
||||
Hostname: id,
|
||||
}
|
||||
}
|
||||
|
||||
func customSpec(context *cli.Context, configPath, rootfs string) (*specs.Spec, error) {
|
||||
b, err := ioutil.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var s specs.Spec
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rootfs != "" && s.Root.Path != rootfs {
|
||||
logrus.Warnf("ignoring config Root.Path %q, setting %q forcibly", s.Root.Path, rootfs)
|
||||
s.Root.Path = rootfs
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
func getConfig(context *cli.Context, imageConfig *ocispec.ImageConfig, rootfs string) (*specs.Spec, error) {
|
||||
if config := context.String("runtime-config"); config != "" {
|
||||
return customSpec(context, config, rootfs)
|
||||
}
|
||||
|
||||
s := spec(context.String("id"), imageConfig, context)
|
||||
if rootfs != "" {
|
||||
s.Root.Path = rootfs
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func newContainerSpec(context *cli.Context, config *ocispec.ImageConfig, imageRef string) ([]byte, error) {
|
||||
spec, err := getConfig(context, config, context.String("rootfs"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if spec.Annotations == nil {
|
||||
spec.Annotations = make(map[string]string)
|
||||
}
|
||||
spec.Annotations["image"] = imageRef
|
||||
rtSpec := windows.RuntimeSpec{
|
||||
OCISpec: *spec,
|
||||
Configuration: hcs.Configuration{
|
||||
Layers: context.StringSlice("layers"),
|
||||
IgnoreFlushesDuringBoot: true,
|
||||
AllowUnqualifiedDNSQuery: true},
|
||||
}
|
||||
return json.Marshal(rtSpec)
|
||||
}
|
||||
|
||||
func newCreateTaskRequest(context *cli.Context, id, tmpDir string, checkpoint *ocispec.Descriptor, mounts []mount.Mount) (*tasks.CreateTaskRequest, error) {
|
||||
create := &tasks.CreateTaskRequest{
|
||||
ContainerID: id,
|
||||
Terminal: context.Bool("tty"),
|
||||
Stdin: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
|
||||
Stdout: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
|
||||
}
|
||||
if !create.Terminal {
|
||||
create.Stderr = fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id)
|
||||
}
|
||||
return create, nil
|
||||
}
|
||||
|
||||
func handleConsoleResize(ctx gocontext.Context, task resizer, con console.Console) error {
|
||||
@ -175,7 +63,14 @@ func handleConsoleResize(ctx gocontext.Context, task resizer, con console.Consol
|
||||
return nil
|
||||
}
|
||||
|
||||
func withTTY() containerd.SpecOpts {
|
||||
func withTTY(terminal bool) containerd.SpecOpts {
|
||||
if !terminal {
|
||||
return func(s *specs.Spec) error {
|
||||
s.Process.Terminal = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
con := console.Current()
|
||||
size, err := con.Size()
|
||||
if err != nil {
|
||||
@ -192,43 +87,37 @@ func newContainer(ctx gocontext.Context, client *containerd.Client, context *cli
|
||||
var (
|
||||
err error
|
||||
|
||||
ref = context.Args().First()
|
||||
id = context.Args().Get(1)
|
||||
args = context.Args()[2:]
|
||||
tty = context.Bool("tty")
|
||||
// ref = context.Args().First()
|
||||
id = context.Args().Get(1)
|
||||
args = context.Args()[2:]
|
||||
tty = context.Bool("tty")
|
||||
labelStrings = context.StringSlice("label")
|
||||
)
|
||||
image, err := client.GetImage(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels := labelArgs(labelStrings)
|
||||
|
||||
// TODO(mlaventure): get base image once we have a snapshotter
|
||||
|
||||
opts := []containerd.SpecOpts{
|
||||
containerd.WithImageConfig(ctx, image),
|
||||
// TODO(mlaventure): use containerd.WithImageConfig once we have a snapshotter
|
||||
withLayers(context),
|
||||
withEnv(context),
|
||||
withMounts(context),
|
||||
withTTY(tty),
|
||||
}
|
||||
if len(args) > 0 {
|
||||
opts = append(opts, containerd.WithProcessArgs(args...))
|
||||
}
|
||||
if tty {
|
||||
opts = append(opts, withTTY())
|
||||
}
|
||||
if context.Bool("net-host") {
|
||||
opts = append(opts, setHostNetworking())
|
||||
}
|
||||
|
||||
spec, err := containerd.GenerateSpec(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rootfs containerd.NewContainerOpts
|
||||
if context.Bool("readonly") {
|
||||
rootfs = containerd.WithNewReadonlyRootFS(id, image)
|
||||
} else {
|
||||
rootfs = containerd.WithNewRootFS(id, image)
|
||||
}
|
||||
|
||||
return client.NewContainer(ctx, id,
|
||||
containerd.WithSpec(spec),
|
||||
containerd.WithImage(image),
|
||||
rootfs,
|
||||
containerd.WithContainerLabels(labels),
|
||||
// TODO(mlaventure): containerd.WithImage(image),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ type NewTaskOpts func(context.Context, *Client, *TaskInfo) error
|
||||
func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
i, err := ioCreate()
|
||||
i, err := ioCreate(c.c.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
100
container_linux_test.go
Normal file
100
container_linux_test.go
Normal file
@ -0,0 +1,100 @@
|
||||
// +build linux
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/cgroups"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func TestContainerUpdate(t *testing.T) {
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := generateSpec(WithImageConfig(ctx, image), withProcessArgs("sleep", "30"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
limit := int64(32 * 1024 * 1024)
|
||||
spec.Linux.Resources.Memory = &specs.LinuxMemory{
|
||||
Limit: &limit,
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithRootFSDeletion)
|
||||
|
||||
task, err := container.NewTask(ctx, empty())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
statusC := make(chan uint32, 1)
|
||||
go func() {
|
||||
status, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
statusC <- status
|
||||
}()
|
||||
|
||||
// check that the task has a limit of 32mb
|
||||
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if int64(stat.Memory.Usage.Limit) != limit {
|
||||
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
|
||||
return
|
||||
}
|
||||
limit = 64 * 1024 * 1024
|
||||
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
|
||||
Memory: &specs.LinuxMemory{
|
||||
Limit: &limit,
|
||||
},
|
||||
})); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// check that the task has a limit of 64mb
|
||||
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if int64(stat.Memory.Usage.Limit) != limit {
|
||||
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
|
||||
}
|
||||
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
<-statusC
|
||||
}
|
@ -6,13 +6,16 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/cgroups"
|
||||
// Register the typeurl
|
||||
_ "github.com/containerd/containerd/runtime"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func empty() IOCreation {
|
||||
@ -48,7 +51,7 @@ func TestNewContainer(t *testing.T) {
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
spec, err := GenerateSpec()
|
||||
spec, err := generateSpec()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -84,22 +87,26 @@ func TestContainerStart(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withExitStatus(7))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sh", "-c", "exit 7"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -151,23 +158,27 @@ func TestContainerOutput(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
expected = "kingkoye"
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("echo", expected))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("echo", expected))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -207,7 +218,7 @@ func TestContainerOutput(t *testing.T) {
|
||||
|
||||
actual := stdout.String()
|
||||
// echo adds a new line
|
||||
expected = expected + "\n"
|
||||
expected = expected + newLine
|
||||
if actual != expected {
|
||||
t.Errorf("expected output %q but received %q", expected, actual)
|
||||
}
|
||||
@ -221,22 +232,26 @@ func TestContainerExec(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -258,12 +273,14 @@ func TestContainerExec(t *testing.T) {
|
||||
close(finished)
|
||||
}()
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// start an exec process without running the original container process info
|
||||
processSpec := spec.Process
|
||||
processSpec.Args = []string{
|
||||
"sh", "-c",
|
||||
"exit 6",
|
||||
}
|
||||
withExecExitStatus(processSpec, 6)
|
||||
execID := t.Name() + "_exec"
|
||||
process, err := task.Exec(ctx, execID, processSpec, empty())
|
||||
if err != nil {
|
||||
@ -275,7 +292,6 @@ func TestContainerExec(t *testing.T) {
|
||||
status, err := process.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
processStatusC <- status
|
||||
}()
|
||||
@ -305,7 +321,7 @@ func TestContainerExec(t *testing.T) {
|
||||
<-finished
|
||||
}
|
||||
|
||||
func TestContainerProcesses(t *testing.T) {
|
||||
func TestContainerPids(t *testing.T) {
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -313,22 +329,26 @@ func TestContainerProcesses(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -351,6 +371,11 @@ func TestContainerProcesses(t *testing.T) {
|
||||
statusC <- status
|
||||
}()
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
pid := task.Pid()
|
||||
if pid <= 0 {
|
||||
t.Errorf("invalid task pid %d", pid)
|
||||
@ -383,29 +408,33 @@ func TestContainerCloseIO(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withCat())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("cat"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithRootFSDeletion)
|
||||
|
||||
const expected = "hello\n"
|
||||
const expected = "hello" + newLine
|
||||
stdout := bytes.NewBuffer(nil)
|
||||
|
||||
r, w, err := os.Pipe()
|
||||
@ -451,12 +480,25 @@ func TestContainerCloseIO(t *testing.T) {
|
||||
|
||||
output := stdout.String()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// On windows we use more and it always adds an extra newline
|
||||
// remove it here
|
||||
output = strings.TrimSuffix(output, newLine)
|
||||
}
|
||||
|
||||
if output != expected {
|
||||
t.Errorf("expected output %q but received %q", expected, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerAttach(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// On windows, closing the write side of the pipe closes the read
|
||||
// side, sending an EOF to it and preventing reopening it.
|
||||
// Hence this test will always fails on windows
|
||||
t.Skip("invalid logic on windows")
|
||||
}
|
||||
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -464,29 +506,33 @@ func TestContainerAttach(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withCat())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("cat"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithRootFSDeletion)
|
||||
|
||||
expected := "hello\n"
|
||||
expected := "hello" + newLine
|
||||
stdout := bytes.NewBuffer(nil)
|
||||
|
||||
r, w, err := os.Pipe()
|
||||
@ -586,22 +632,26 @@ func TestDeleteRunningContainer(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithImage(image), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -651,22 +701,26 @@ func TestContainerKill(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withCat())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sh", "-c", "cat"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithImage(image), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -709,95 +763,6 @@ func TestContainerKill(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerUpdate(t *testing.T) {
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "30"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
limit := int64(32 * 1024 * 1024)
|
||||
spec.Linux.Resources.Memory = &specs.LinuxMemory{
|
||||
Limit: &limit,
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithRootFSDeletion)
|
||||
|
||||
task, err := container.NewTask(ctx, empty())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
statusC := make(chan uint32, 1)
|
||||
go func() {
|
||||
status, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
statusC <- status
|
||||
}()
|
||||
|
||||
// check that the task has a limit of 32mb
|
||||
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if int64(stat.Memory.Usage.Limit) != limit {
|
||||
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
|
||||
return
|
||||
}
|
||||
limit = 64 * 1024 * 1024
|
||||
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
|
||||
Memory: &specs.LinuxMemory{
|
||||
Limit: &limit,
|
||||
},
|
||||
})); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// check that the task has a limit of 64mb
|
||||
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if int64(stat.Memory.Usage.Limit) != limit {
|
||||
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
|
||||
}
|
||||
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
<-statusC
|
||||
}
|
||||
|
||||
func TestContainerNoBinaryExists(t *testing.T) {
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
@ -806,30 +771,47 @@ func TestContainerNoBinaryExists(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("nothing"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("nothing"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithRootFSDeletion)
|
||||
|
||||
if _, err := container.NewTask(ctx, Stdio); err == nil {
|
||||
t.Error("NewTask should return an error when binary does not exist")
|
||||
task, err := container.NewTask(ctx, Stdio)
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if err != nil {
|
||||
t.Errorf("failed to create task %v", err)
|
||||
}
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error("task.Start() should return an error when binary does not exist")
|
||||
task.Delete(ctx)
|
||||
}
|
||||
default:
|
||||
if err == nil {
|
||||
t.Error("NewTask should return an error when binary does not exist")
|
||||
task.Delete(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -841,22 +823,26 @@ func TestContainerExecNoBinaryExists(t *testing.T) {
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
image Image
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if runtime.GOOS != "windows" {
|
||||
image, err = client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spec, err := generateSpec(withImageConfig(ctx, image), withProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := GenerateSpec(WithImageConfig(ctx, image), WithProcessArgs("sleep", "100"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewRootFS(id, image))
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), withNewRootFS(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -870,6 +856,11 @@ func TestContainerExecNoBinaryExists(t *testing.T) {
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
if _, err := task.Wait(ctx); err != nil {
|
||||
|
@ -258,9 +258,11 @@ func checkBlobPath(t *testing.T, cs Store, dgst digest.Digest) string {
|
||||
t.Fatalf("error stating blob path: %v", err)
|
||||
}
|
||||
|
||||
// ensure that only read bits are set.
|
||||
if ((fi.Mode() & os.ModePerm) & 0333) != 0 {
|
||||
t.Fatalf("incorrect permissions: %v", fi.Mode())
|
||||
if runtime.GOOS != "windows" {
|
||||
// ensure that only read bits are set.
|
||||
if ((fi.Mode() & os.ModePerm) & 0333) != 0 {
|
||||
t.Fatalf("incorrect permissions: %v", fi.Mode())
|
||||
}
|
||||
}
|
||||
|
||||
return path
|
||||
|
@ -3,6 +3,7 @@ package content
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
@ -67,8 +68,12 @@ func (w *writer) Commit(size int64, expected digest.Digest) error {
|
||||
// only allowing reads honoring the umask on creation.
|
||||
//
|
||||
// This removes write and exec, only allowing read per the creation umask.
|
||||
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
|
||||
return errors.Wrap(err, "failed to change ingest file permissions")
|
||||
//
|
||||
// NOTE: Windows does not support this operation
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
|
||||
return errors.Wrap(err, "failed to change ingest file permissions")
|
||||
}
|
||||
}
|
||||
|
||||
if size > 0 && size != fi.Size() {
|
||||
|
@ -21,7 +21,7 @@ import "github.com/pkg/errors"
|
||||
// map very well to those defined by grpc.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid")
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
|
@ -47,7 +47,9 @@ func (e *Emitter) Events(ctx context.Context, clientID string) chan *events.Enve
|
||||
ns: ns,
|
||||
}
|
||||
e.sinks[clientID] = s
|
||||
e.m.Unlock()
|
||||
e.broadcaster.Add(s)
|
||||
return s.ch
|
||||
}
|
||||
ch := e.sinks[clientID].ch
|
||||
e.m.Unlock()
|
||||
|
@ -13,7 +13,7 @@ type resourceUpdate struct {
|
||||
}
|
||||
|
||||
func (u resourceUpdate) String() string {
|
||||
return fmt.Sprintf("%s(mode: %o, uid: %s, gid: %s) -> %s(mode: %o, uid: %s, gid: %s)",
|
||||
return fmt.Sprintf("%s(mode: %o, uid: %d, gid: %d) -> %s(mode: %o, uid: %d, gid: %d)",
|
||||
u.Original.Path(), u.Original.Mode(), u.Original.UID(), u.Original.GID(),
|
||||
u.Updated.Path(), u.Updated.Mode(), u.Updated.UID(), u.Updated.GID(),
|
||||
)
|
||||
|
51
helpers_unix_test.go
Normal file
51
helpers_unix_test.go
Normal file
@ -0,0 +1,51 @@
|
||||
// +build !windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const newLine = "\n"
|
||||
|
||||
func generateSpec(opts ...SpecOpts) (*specs.Spec, error) {
|
||||
return GenerateSpec(opts...)
|
||||
}
|
||||
|
||||
func withExitStatus(es int) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
s.Process.Args = []string{"sh", "-c", fmt.Sprintf("exit %d", es)}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withProcessArgs(args ...string) SpecOpts {
|
||||
return WithProcessArgs(args...)
|
||||
}
|
||||
|
||||
func withCat() SpecOpts {
|
||||
return WithProcessArgs("cat")
|
||||
}
|
||||
|
||||
func withTrue() SpecOpts {
|
||||
return WithProcessArgs("true")
|
||||
}
|
||||
|
||||
func withExecExitStatus(s *specs.Process, es int) {
|
||||
s.Args = []string{"sh", "-c", fmt.Sprintf("exit %d", es)}
|
||||
}
|
||||
|
||||
func withExecArgs(s *specs.Process, args ...string) {
|
||||
s.Args = args
|
||||
}
|
||||
|
||||
func withImageConfig(ctx context.Context, i Image) SpecOpts {
|
||||
return WithImageConfig(ctx, i)
|
||||
}
|
||||
|
||||
func withNewRootFS(id string, i Image) NewContainerOpts {
|
||||
return WithNewRootFS(id, i)
|
||||
}
|
65
helpers_windows_test.go
Normal file
65
helpers_windows_test.go
Normal file
@ -0,0 +1,65 @@
|
||||
// +build windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const newLine = "\r\n"
|
||||
|
||||
func generateSpec(opts ...SpecOpts) (*specs.Spec, error) {
|
||||
spec, err := GenerateSpec(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spec.Windows.LayerFolders = dockerLayerFolders
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func withExitStatus(es int) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
s.Process.Args = []string{"powershell", "-noprofile", "exit", strconv.Itoa(es)}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withProcessArgs(args ...string) SpecOpts {
|
||||
return WithProcessArgs(append([]string{"powershell", "-noprofile"}, args...)...)
|
||||
}
|
||||
|
||||
func withCat() SpecOpts {
|
||||
return WithProcessArgs("cmd", "/c", "more")
|
||||
}
|
||||
|
||||
func withTrue() SpecOpts {
|
||||
return WithProcessArgs("cmd", "/c")
|
||||
}
|
||||
|
||||
func withExecExitStatus(s *specs.Process, es int) {
|
||||
s.Args = []string{"powershell", "-noprofile", "exit", strconv.Itoa(es)}
|
||||
}
|
||||
|
||||
func withExecArgs(s *specs.Process, args ...string) {
|
||||
s.Args = append([]string{"powershell", "-noprofile"}, args...)
|
||||
}
|
||||
|
||||
func withImageConfig(ctx context.Context, i Image) SpecOpts {
|
||||
// TODO: when windows has a snapshotter remove the withImageConfig helper
|
||||
return func(s *specs.Spec) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withNewRootFS(id string, i Image) NewContainerOpts {
|
||||
// TODO: when windows has a snapshotter remove the withNewRootFS helper
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
return nil
|
||||
}
|
||||
}
|
35
io.go
35
io.go
@ -4,9 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -40,7 +38,7 @@ func (i *IO) Close() error {
|
||||
return i.closer.Close()
|
||||
}
|
||||
|
||||
type IOCreation func() (*IO, error)
|
||||
type IOCreation func(id string) (*IO, error)
|
||||
|
||||
type IOAttach func(*FIFOSet) (*IO, error)
|
||||
|
||||
@ -49,8 +47,8 @@ func NewIO(stdin io.Reader, stdout, stderr io.Writer) IOCreation {
|
||||
}
|
||||
|
||||
func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation {
|
||||
return func() (*IO, error) {
|
||||
paths, err := NewFifos()
|
||||
return func(id string) (*IO, error) {
|
||||
paths, err := NewFifos(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -72,7 +70,6 @@ func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool)
|
||||
i.closer = closer
|
||||
return i, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
|
||||
@ -102,31 +99,13 @@ func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
|
||||
|
||||
// Stdio returns an IO implementation to be used for a task
|
||||
// that outputs the container's IO as the current processes Stdio
|
||||
func Stdio() (*IO, error) {
|
||||
return NewIO(os.Stdin, os.Stdout, os.Stderr)()
|
||||
func Stdio(id string) (*IO, error) {
|
||||
return NewIO(os.Stdin, os.Stdout, os.Stderr)(id)
|
||||
}
|
||||
|
||||
// StdioTerminal will setup the IO for the task to use a terminal
|
||||
func StdioTerminal() (*IO, error) {
|
||||
return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)()
|
||||
}
|
||||
|
||||
// NewFifos returns a new set of fifos for the task
|
||||
func NewFifos() (*FIFOSet, error) {
|
||||
root := filepath.Join(os.TempDir(), "containerd")
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir, err := ioutil.TempDir(root, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FIFOSet{
|
||||
Dir: dir,
|
||||
In: filepath.Join(dir, "stdin"),
|
||||
Out: filepath.Join(dir, "stdout"),
|
||||
Err: filepath.Join(dir, "stderr"),
|
||||
}, nil
|
||||
func StdioTerminal(id string) (*IO, error) {
|
||||
return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)(id)
|
||||
}
|
||||
|
||||
type FIFOSet struct {
|
||||
|
21
io_unix.go
21
io_unix.go
@ -5,12 +5,33 @@ package containerd
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/fifo"
|
||||
)
|
||||
|
||||
// NewFifos returns a new set of fifos for the task
|
||||
func NewFifos(id string) (*FIFOSet, error) {
|
||||
root := filepath.Join(os.TempDir(), "containerd")
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir, err := ioutil.TempDir(root, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FIFOSet{
|
||||
Dir: dir,
|
||||
In: filepath.Join(dir, id+"-stdin"),
|
||||
Out: filepath.Join(dir, id+"-stdout"),
|
||||
Err: filepath.Join(dir, id+"-stderr"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
|
||||
var (
|
||||
f io.ReadWriteCloser
|
||||
|
@ -1,6 +1,7 @@
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
@ -10,8 +11,22 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const pipeRoot = `\\.\pipe`
|
||||
|
||||
// NewFifos returns a new set of fifos for the task
|
||||
func NewFifos(id string) (*FIFOSet, error) {
|
||||
return &FIFOSet{
|
||||
In: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
|
||||
Out: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
|
||||
Err: fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
|
||||
var wg sync.WaitGroup
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
set []io.Closer
|
||||
)
|
||||
|
||||
if fifos.In != "" {
|
||||
l, err := winio.ListenPipe(fifos.In, nil)
|
||||
@ -23,6 +38,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
|
||||
l.Close()
|
||||
}
|
||||
}(l)
|
||||
set = append(set, l)
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
@ -46,6 +62,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
|
||||
l.Close()
|
||||
}
|
||||
}(l)
|
||||
set = append(set, l)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@ -71,6 +88,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
|
||||
l.Close()
|
||||
}
|
||||
}(l)
|
||||
set = append(set, l)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@ -89,5 +107,11 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
|
||||
return &wgCloser{
|
||||
wg: &wg,
|
||||
dir: fifos.Dir,
|
||||
set: set,
|
||||
cancel: func() {
|
||||
for _, l := range set {
|
||||
l.Close()
|
||||
}
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ type Task interface {
|
||||
Pids(context.Context) ([]uint32, error)
|
||||
// Checkpoint checkpoints a container to an image with live system data
|
||||
Checkpoint(context.Context, string, *types.Any) error
|
||||
// DeleteProcess deletes a specific exec process via the pid
|
||||
// DeleteProcess deletes a specific exec process via its id
|
||||
DeleteProcess(context.Context, string) (*Exit, error)
|
||||
// Update sets the provided resources to a running task
|
||||
Update(context.Context, *types.Any) error
|
||||
|
17
runtime/typeurl.go
Normal file
17
runtime/typeurl.go
Normal file
@ -0,0 +1,17 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// register TypeUrls for commonly marshaled external types
|
||||
major := strconv.Itoa(specs.VersionMajor)
|
||||
typeurl.Register(&specs.Spec{}, "opencontainers/runtime-spec", major, "Spec")
|
||||
typeurl.Register(&specs.Process{}, "opencontainers/runtime-spec", major, "Process")
|
||||
typeurl.Register(&specs.LinuxResources{}, "opencontainers/runtime-spec", major, "LinuxResources")
|
||||
typeurl.Register(&specs.WindowsResources{}, "opencontainers/runtime-spec", major, "WindowsResources")
|
||||
}
|
@ -8,6 +8,13 @@ import (
|
||||
"github.com/containerd/containerd/sys"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/run/containerd/containerd.sock"
|
||||
// DefaultDebuggAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||
)
|
||||
|
||||
// apply sets config settings on the server process
|
||||
func apply(ctx context.Context, config *Config) error {
|
||||
if config.Subreaper {
|
||||
|
@ -1,9 +1,16 @@
|
||||
// +build !linux
|
||||
// +build !linux,!windows
|
||||
|
||||
package server
|
||||
|
||||
import "context"
|
||||
|
||||
const (
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/run/containerd/containerd.sock"
|
||||
// DefaultDebuggAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||
)
|
||||
|
||||
func apply(_ context.Context, _ *Config) error {
|
||||
return nil
|
||||
}
|
||||
|
16
server/server_windows.go
Normal file
16
server/server_windows.go
Normal file
@ -0,0 +1,16 @@
|
||||
// +build windows
|
||||
|
||||
package server
|
||||
|
||||
import "context"
|
||||
|
||||
const (
|
||||
// DefaultAddress is the default winpipe address
|
||||
DefaultAddress = `\\.\pipe\containerd-containerd`
|
||||
// DefaultDebugAddress is the default winpipe address for pprof data
|
||||
DefaultDebugAddress = `\\.\pipe\containerd-debug`
|
||||
)
|
||||
|
||||
func apply(_ context.Context, _ *Config) error {
|
||||
return nil
|
||||
}
|
@ -12,18 +12,23 @@ import (
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const pipeRoot = `\\.\pipe`
|
||||
|
||||
func createDefaultSpec() (*specs.Spec, error) {
|
||||
return &specs.Spec{
|
||||
Version: specs.Version,
|
||||
Root: &specs.Root{},
|
||||
Process: &specs.Process{
|
||||
Cwd: `C:\`,
|
||||
ConsoleSize: &specs.Box{
|
||||
Width: 80,
|
||||
Height: 20,
|
||||
},
|
||||
},
|
||||
Windows: &specs.Windows{
|
||||
IgnoreFlushesDuringBoot: true,
|
||||
Network: &specs.WindowsNetwork{
|
||||
AllowUnqualifiedDNSQuery: true,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
3
task.go
3
task.go
@ -188,6 +188,7 @@ func (t *task) Wait(ctx context.Context) (uint32, error) {
|
||||
// during cleanup
|
||||
func (t *task) Delete(ctx context.Context) (uint32, error) {
|
||||
if t.io != nil {
|
||||
t.io.Cancel()
|
||||
t.io.Wait()
|
||||
t.io.Close()
|
||||
}
|
||||
@ -204,7 +205,7 @@ func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreat
|
||||
if id == "" {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty")
|
||||
}
|
||||
i, err := ioCreate()
|
||||
i, err := ioCreate(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,15 +2,11 @@ package testutil
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var rootEnabled bool
|
||||
@ -19,36 +15,6 @@ func init() {
|
||||
flag.BoolVar(&rootEnabled, "test.root", false, "enable tests that require root")
|
||||
}
|
||||
|
||||
// Unmount unmounts a given mountPoint and sets t.Error if it fails
|
||||
func Unmount(t *testing.T, mountPoint string) {
|
||||
t.Log("unmount", mountPoint)
|
||||
if err := mount.Unmount(mountPoint, 0); err != nil {
|
||||
t.Error("Could not umount", mountPoint, err)
|
||||
}
|
||||
}
|
||||
|
||||
// RequiresRoot skips tests that require root, unless the test.root flag has
|
||||
// been set
|
||||
func RequiresRoot(t testing.TB) {
|
||||
if !rootEnabled {
|
||||
t.Skip("skipping test that requires root")
|
||||
return
|
||||
}
|
||||
assert.Equal(t, 0, os.Getuid(), "This test must be run as root.")
|
||||
}
|
||||
|
||||
// RequiresRootM is similar to RequiresRoot but intended to be called from *testing.M.
|
||||
func RequiresRootM() {
|
||||
if !rootEnabled {
|
||||
fmt.Fprintln(os.Stderr, "skipping test that requires root")
|
||||
os.Exit(0)
|
||||
}
|
||||
if 0 != os.Getuid() {
|
||||
fmt.Fprintln(os.Stderr, "This test must be run as root.")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// DumpDir will log out all of the contents of the provided directory to
|
||||
// testing logger.
|
||||
//
|
||||
|
42
testutil/helpers_unix.go
Normal file
42
testutil/helpers_unix.go
Normal file
@ -0,0 +1,42 @@
|
||||
// +build !windows
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Unmount unmounts a given mountPoint and sets t.Error if it fails
|
||||
func Unmount(t *testing.T, mountPoint string) {
|
||||
t.Log("unmount", mountPoint)
|
||||
if err := mount.Unmount(mountPoint, 0); err != nil {
|
||||
t.Error("Could not umount", mountPoint, err)
|
||||
}
|
||||
}
|
||||
|
||||
// RequiresRoot skips tests that require root, unless the test.root flag has
|
||||
// been set
|
||||
func RequiresRoot(t testing.TB) {
|
||||
if !rootEnabled {
|
||||
t.Skip("skipping test that requires root")
|
||||
return
|
||||
}
|
||||
assert.Equal(t, 0, os.Getuid(), "This test must be run as root.")
|
||||
}
|
||||
|
||||
// RequiresRootM is similar to RequiresRoot but intended to be called from *testing.M.
|
||||
func RequiresRootM() {
|
||||
if !rootEnabled {
|
||||
fmt.Fprintln(os.Stderr, "skipping test that requires root")
|
||||
os.Exit(0)
|
||||
}
|
||||
if 0 != os.Getuid() {
|
||||
fmt.Fprintln(os.Stderr, "This test must be run as root.")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
16
testutil/helpers_windows.go
Normal file
16
testutil/helpers_windows.go
Normal file
@ -0,0 +1,16 @@
|
||||
package testutil
|
||||
|
||||
import "testing"
|
||||
|
||||
// RequiresRoot does nothing on Windows
|
||||
func RequiresRoot(t testing.TB) {
|
||||
}
|
||||
|
||||
// RequiresRootM is similar to RequiresRoot but intended to be called from *testing.M.
|
||||
func RequiresRootM() {
|
||||
}
|
||||
|
||||
// Unmount unmounts a given mountPoint and sets t.Error if it fails
|
||||
// Does nothing on Windows
|
||||
func Unmount(t *testing.T, mountPoint string) {
|
||||
}
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const Prefix = "types.containerd.io"
|
||||
@ -39,7 +40,7 @@ func TypeURL(v interface{}) (string, error) {
|
||||
// fallback to the proto registry if it is a proto message
|
||||
pb, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return "", errdefs.ErrNotFound
|
||||
return "", errors.Wrapf(errdefs.ErrNotFound, "type %s", reflect.TypeOf(v))
|
||||
}
|
||||
return path.Join(Prefix, proto.MessageName(pb)), nil
|
||||
}
|
||||
@ -116,7 +117,7 @@ func getTypeByUrl(url string) (urlType, error) {
|
||||
isProto: true,
|
||||
}, nil
|
||||
}
|
||||
return urlType{}, errdefs.ErrNotFound
|
||||
return urlType{}, errors.Wrapf(errdefs.ErrNotFound, "type with url %s", url)
|
||||
}
|
||||
|
||||
func tryDereference(v interface{}) reflect.Type {
|
||||
|
@ -33,7 +33,7 @@ github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
github.com/BurntSushi/toml v0.2.0-21-g9906417
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
github.com/Microsoft/go-winio v0.4.1
|
||||
github.com/Microsoft/go-winio v0.4.3
|
||||
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
|
||||
github.com/Microsoft/hcsshim v0.5.15
|
||||
github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e
|
||||
|
20
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
20
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -69,6 +69,7 @@ func initIo() {
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
sync.Mutex
|
||||
handle syscall.Handle
|
||||
wg sync.WaitGroup
|
||||
closing bool
|
||||
@ -105,17 +106,28 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return makeWin32File(h)
|
||||
}
|
||||
|
||||
func (f *win32File) isClosing() bool {
|
||||
f.Lock()
|
||||
closing := f.closing
|
||||
f.Unlock()
|
||||
return closing
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() {
|
||||
f.Lock()
|
||||
if !f.closing {
|
||||
// cancel all IO and wait for it to complete
|
||||
f.closing = true
|
||||
f.Unlock()
|
||||
cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
f.handle = 0
|
||||
return
|
||||
}
|
||||
f.Unlock()
|
||||
}
|
||||
|
||||
// Close closes a win32File.
|
||||
@ -127,10 +139,10 @@ func (f *win32File) Close() error {
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
f.wg.Add(1)
|
||||
if f.closing {
|
||||
if f.isClosing() {
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
@ -159,7 +171,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing {
|
||||
if f.isClosing() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
@ -175,7 +187,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing {
|
||||
if f.isClosing() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
}
|
||||
|
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -13,19 +13,12 @@ import (
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys copyMemory(dst uintptr, src uintptr, length uint32) = RtlCopyMemory
|
||||
|
||||
type securityAttributes struct {
|
||||
Length uint32
|
||||
SecurityDescriptor uintptr
|
||||
InheritHandle uint32
|
||||
}
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
@ -233,13 +226,13 @@ func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig,
|
||||
mode |= cPIPE_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
sa := &securityAttributes{}
|
||||
sa := &syscall.SecurityAttributes{}
|
||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
||||
if securityDescriptor != nil {
|
||||
len := uint32(len(securityDescriptor))
|
||||
sa.SecurityDescriptor = localAlloc(0, len)
|
||||
defer localFree(sa.SecurityDescriptor)
|
||||
copyMemory(sa.SecurityDescriptor, uintptr(unsafe.Pointer(&securityDescriptor[0])), len)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
||||
}
|
||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
||||
if err != nil {
|
||||
|
14
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
14
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -53,7 +53,6 @@ var (
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procRtlCopyMemory = modkernel32.NewProc("RtlCopyMemory")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
@ -141,7 +140,7 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
@ -150,7 +149,7 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui
|
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
@ -163,7 +162,7 @@ func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances
|
||||
return
|
||||
}
|
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
@ -172,7 +171,7 @@ func createFile(name string, access uint32, mode uint32, sa *securityAttributes,
|
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
@ -236,11 +235,6 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
return
|
||||
}
|
||||
|
||||
func copyMemory(dst uintptr, src uintptr, length uint32) {
|
||||
syscall.Syscall(procRtlCopyMemory.Addr(), 3, uintptr(dst), uintptr(src), uintptr(length))
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
|
146
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
146
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
/*
|
||||
Package duration is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Duration
|
||||
*/
|
||||
package duration
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 189 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
|
||||
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
|
||||
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
|
||||
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
|
||||
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
|
||||
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
|
||||
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xfb, 0x83, 0x91, 0x71, 0x11, 0x13,
|
||||
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
|
||||
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
|
||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x45, 0x5a, 0x81, 0x3d, 0x0e, 0x01, 0x00, 0x00,
|
||||
}
|
117
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
117
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
@ -1,237 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/containerd/containerd/windows/hcs"
|
||||
"github.com/gogo/protobuf/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
winsys "golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var ErrLoadedContainer = errors.New("loaded container can only be terminated")
|
||||
|
||||
func loadContainers(ctx context.Context, h *hcs.HCS) ([]*container, error) {
|
||||
hCtr, err := h.LoadContainers(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containers := make([]*container, 0)
|
||||
for _, c := range hCtr {
|
||||
containers = append(containers, &container{
|
||||
ctr: c,
|
||||
status: runtime.RunningStatus,
|
||||
})
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func newContainer(ctx context.Context, h *hcs.HCS, id string, spec *RuntimeSpec, io runtime.IO) (*container, error) {
|
||||
cio, err := hcs.NewIO(io.Stdin, io.Stdout, io.Stderr, io.Terminal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hcsCtr, err := h.CreateContainer(ctx, id, spec.OCISpec, spec.Configuration, cio)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//sendEvent(id, events.RuntimeEvent_CREATE, hcsCtr.Pid(), 0, time.Time{})
|
||||
|
||||
return &container{
|
||||
ctr: hcsCtr,
|
||||
status: runtime.CreatedStatus,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type container struct {
|
||||
sync.Mutex
|
||||
|
||||
ctr *hcs.Container
|
||||
status runtime.Status
|
||||
}
|
||||
|
||||
func (c *container) ID() string {
|
||||
return c.ctr.ID()
|
||||
}
|
||||
|
||||
func (c *container) Info() runtime.TaskInfo {
|
||||
return runtime.TaskInfo{
|
||||
ID: c.ctr.ID(),
|
||||
Runtime: runtimeName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *container) Start(ctx context.Context) error {
|
||||
if c.ctr.Pid() == 0 {
|
||||
return ErrLoadedContainer
|
||||
}
|
||||
|
||||
err := c.ctr.Start(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.setStatus(runtime.RunningStatus)
|
||||
// c.sendEvent(c.ctr.ID(), events.RuntimeEvent_START, c.ctr.Pid(), 0, time.Time{})
|
||||
|
||||
// Wait for our process to terminate
|
||||
go func() {
|
||||
_, err := c.ctr.ExitCode()
|
||||
if err != nil {
|
||||
log.G(ctx).Debug(err)
|
||||
}
|
||||
c.setStatus(runtime.StoppedStatus)
|
||||
// c.sendEvent(c.ctr.ID(), events.RuntimeEvent_EXIT, c.ctr.Pid(), ec, c.ctr.Processes()[0].ExitedAt())
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *container) Pause(ctx context.Context) error {
|
||||
if c.ctr.GetConfiguration().UseHyperV == false {
|
||||
return fmt.Errorf("Windows non-HyperV containers do not support pause")
|
||||
}
|
||||
return c.ctr.Pause()
|
||||
}
|
||||
|
||||
func (c *container) Resume(ctx context.Context) error {
|
||||
if c.ctr.GetConfiguration().UseHyperV == false {
|
||||
return fmt.Errorf("Windows non-HyperV containers do not support resume")
|
||||
}
|
||||
return c.ctr.Resume()
|
||||
}
|
||||
|
||||
func (c *container) State(ctx context.Context) (runtime.State, error) {
|
||||
return runtime.State{
|
||||
Pid: c.Pid(),
|
||||
Status: c.Status(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *container) Kill(ctx context.Context, signal uint32, all bool) error {
|
||||
if winsys.Signal(signal) == winsys.SIGKILL {
|
||||
return c.ctr.Kill(ctx)
|
||||
}
|
||||
return c.ctr.Stop(ctx)
|
||||
}
|
||||
|
||||
func (c *container) Process(ctx context.Context, id string) (runtime.Process, error) {
|
||||
for _, p := range c.ctr.Processes() {
|
||||
if p.ID() == id {
|
||||
return &process{p}, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Errorf("process %s not found", id)
|
||||
}
|
||||
|
||||
func (c *container) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) {
|
||||
if c.ctr.Pid() == 0 {
|
||||
return nil, ErrLoadedContainer
|
||||
}
|
||||
|
||||
pio, err := hcs.NewIO(opts.IO.Stdin, opts.IO.Stdout, opts.IO.Stderr, opts.IO.Terminal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var procSpec specs.Process
|
||||
if err := json.Unmarshal(opts.Spec.Value, &procSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal oci spec")
|
||||
}
|
||||
|
||||
p, err := c.ctr.AddProcess(ctx, id, &procSpec, pio)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
_, err := p.ExitCode()
|
||||
if err != nil {
|
||||
log.G(ctx).Debug(err)
|
||||
}
|
||||
//c.sendEvent(c.ctr.ID(), events.RuntimeEvent_EXEC_ADDED, p.Pid(), ec, p.ExitedAt())
|
||||
}()
|
||||
|
||||
return &process{p}, nil
|
||||
}
|
||||
|
||||
func (c *container) CloseIO(ctx context.Context) error {
|
||||
return c.ctr.CloseIO(ctx)
|
||||
}
|
||||
|
||||
func (c *container) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
|
||||
return c.ctr.ResizePty(ctx, size)
|
||||
}
|
||||
|
||||
func (c *container) Status() runtime.Status {
|
||||
return c.getStatus()
|
||||
}
|
||||
|
||||
func (c *container) Pid() uint32 {
|
||||
return c.ctr.Pid()
|
||||
}
|
||||
|
||||
func (c *container) Pids(ctx context.Context) ([]uint32, error) {
|
||||
pl, err := c.ctr.ProcessList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pids := make([]uint32, 0, len(pl))
|
||||
for _, p := range pl {
|
||||
pids = append(pids, p.ProcessId)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func (c *container) Checkpoint(ctx context.Context, _ string, _ *types.Any) error {
|
||||
return fmt.Errorf("Windows containers do not support checkpoint")
|
||||
}
|
||||
|
||||
func (c *container) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, error) {
|
||||
var process *hcs.Process
|
||||
for _, p := range c.ctr.Processes() {
|
||||
if p.ID() == id {
|
||||
process = p
|
||||
break
|
||||
}
|
||||
}
|
||||
if process == nil {
|
||||
return nil, fmt.Errorf("process %s not found", id)
|
||||
}
|
||||
ec, err := process.ExitCode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
process.Delete()
|
||||
return &runtime.Exit{
|
||||
Status: ec,
|
||||
Timestamp: process.ExitedAt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *container) Update(ctx context.Context, spec *types.Any) error {
|
||||
return fmt.Errorf("Windows containers do not support update")
|
||||
}
|
||||
|
||||
func (c *container) setStatus(status runtime.Status) {
|
||||
c.Lock()
|
||||
c.status = status
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
func (c *container) getStatus() runtime.Status {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
return c.status
|
||||
}
|
@ -1,572 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/containerd/containerd/windows/pid"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
layerFile = "layer"
|
||||
defaultTerminateTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
func (s *HCS) LoadContainers(ctx context.Context) ([]*Container, error) {
|
||||
ctrProps, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to retrieve running containers")
|
||||
}
|
||||
|
||||
containers := make([]*Container, 0)
|
||||
for _, p := range ctrProps {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if p.Owner != s.owner || p.SystemType != "Container" {
|
||||
continue
|
||||
}
|
||||
|
||||
container, err := hcsshim.OpenContainer(p.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed open container %s", p.ID)
|
||||
}
|
||||
stateDir := filepath.Join(s.stateDir, p.ID)
|
||||
b, err := ioutil.ReadFile(filepath.Join(stateDir, layerFile))
|
||||
containers = append(containers, &Container{
|
||||
id: p.ID,
|
||||
Container: container,
|
||||
stateDir: stateDir,
|
||||
hcs: s,
|
||||
io: &IO{},
|
||||
layerFolderPath: string(b),
|
||||
conf: Configuration{
|
||||
TerminateDuration: defaultTerminateTimeout,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func New(owner, rootDir string) *HCS {
|
||||
return &HCS{
|
||||
stateDir: rootDir,
|
||||
owner: owner,
|
||||
pidPool: pid.NewPool(),
|
||||
}
|
||||
}
|
||||
|
||||
type HCS struct {
|
||||
stateDir string
|
||||
owner string
|
||||
pidPool *pid.Pool
|
||||
}
|
||||
|
||||
func (s *HCS) CreateContainer(ctx context.Context, id string, spec specs.Spec, conf Configuration, io *IO) (c *Container, err error) {
|
||||
pid, err := s.pidPool.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pidPool.Put(pid)
|
||||
}
|
||||
}()
|
||||
|
||||
stateDir := filepath.Join(s.stateDir, id)
|
||||
if err := os.MkdirAll(stateDir, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to create container state dir %s", stateDir)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(stateDir)
|
||||
}
|
||||
}()
|
||||
|
||||
if conf.TerminateDuration == 0 {
|
||||
conf.TerminateDuration = defaultTerminateTimeout
|
||||
}
|
||||
|
||||
ctrConf, err := newContainerConfig(s.owner, id, spec, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layerPathFile := filepath.Join(stateDir, layerFile)
|
||||
if err := ioutil.WriteFile(layerPathFile, []byte(ctrConf.LayerFolderPath), 0644); err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to save active layer %s", ctrConf.LayerFolderPath)
|
||||
}
|
||||
|
||||
ctr, err := hcsshim.CreateContainer(id, ctrConf)
|
||||
if err != nil {
|
||||
removeLayer(ctx, ctrConf.LayerFolderPath)
|
||||
return nil, errors.Wrapf(err, "failed to create container %s", id)
|
||||
}
|
||||
|
||||
err = ctr.Start()
|
||||
if err != nil {
|
||||
ctr.Terminate()
|
||||
removeLayer(ctx, ctrConf.LayerFolderPath)
|
||||
return nil, errors.Wrapf(err, "failed to start container %s", id)
|
||||
}
|
||||
|
||||
return &Container{
|
||||
Container: ctr,
|
||||
id: id,
|
||||
pid: pid,
|
||||
spec: spec,
|
||||
conf: conf,
|
||||
stateDir: stateDir,
|
||||
io: io,
|
||||
hcs: s,
|
||||
layerFolderPath: ctrConf.LayerFolderPath,
|
||||
processes: make([]*Process, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
sync.Mutex
|
||||
hcsshim.Container
|
||||
|
||||
id string
|
||||
stateDir string
|
||||
pid uint32
|
||||
spec specs.Spec
|
||||
conf Configuration
|
||||
io *IO
|
||||
hcs *HCS
|
||||
layerFolderPath string
|
||||
|
||||
processes []*Process
|
||||
}
|
||||
|
||||
func (c *Container) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c *Container) Pid() uint32 {
|
||||
return c.pid
|
||||
}
|
||||
|
||||
func (c *Container) Processes() []*Process {
|
||||
return c.processes
|
||||
}
|
||||
|
||||
func (c *Container) Start(ctx context.Context) error {
|
||||
_, err := c.addProcess(ctx, c.id, c.spec.Process, c.io)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Container) getDeathErr(err error) error {
|
||||
switch {
|
||||
case hcsshim.IsPending(err):
|
||||
err = c.WaitTimeout(c.conf.TerminateDuration)
|
||||
case hcsshim.IsAlreadyStopped(err):
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Container) Kill(ctx context.Context) error {
|
||||
return c.getDeathErr(c.Terminate())
|
||||
}
|
||||
|
||||
func (c *Container) Stop(ctx context.Context) error {
|
||||
err := c.getDeathErr(c.Shutdown())
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("failed to shutdown container %s, calling terminate", c.id)
|
||||
return c.getDeathErr(c.Terminate())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) CloseIO(ctx context.Context) error {
|
||||
var proc *Process
|
||||
c.Lock()
|
||||
for _, p := range c.processes {
|
||||
if p.id == c.id {
|
||||
proc = p
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Unlock()
|
||||
if proc == nil {
|
||||
return errors.Errorf("no such process %s", c.id)
|
||||
}
|
||||
|
||||
return proc.CloseStdin()
|
||||
}
|
||||
|
||||
func (c *Container) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
|
||||
var proc *Process
|
||||
c.Lock()
|
||||
for _, p := range c.processes {
|
||||
if p.id == c.id {
|
||||
proc = p
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Unlock()
|
||||
if proc == nil {
|
||||
return errors.Errorf("no such process %s", c.id)
|
||||
}
|
||||
|
||||
return proc.ResizeConsole(uint16(size.Width), uint16(size.Height))
|
||||
}
|
||||
|
||||
func (c *Container) Delete(ctx context.Context) {
|
||||
defer func() {
|
||||
if err := c.Stop(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", c.id).
|
||||
Errorf("failed to shutdown/terminate container")
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
for _, p := range c.processes {
|
||||
if err := p.Delete(); err != nil {
|
||||
log.G(ctx).WithError(err).WithFields(logrus.Fields{"pid": p.Pid(), "id": c.id}).
|
||||
Errorf("failed to clean process resources")
|
||||
}
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
if err := c.Close(); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", c.id).Errorf("failed to clean container resources")
|
||||
}
|
||||
|
||||
c.io.Close()
|
||||
|
||||
// Cleanup folder layer
|
||||
if err := removeLayer(ctx, c.layerFolderPath); err == nil {
|
||||
os.RemoveAll(c.stateDir)
|
||||
}
|
||||
}()
|
||||
|
||||
if update, err := c.HasPendingUpdates(); err != nil || !update {
|
||||
return
|
||||
}
|
||||
|
||||
serviceCtr, err := c.hcs.CreateContainer(ctx, c.id+"_servicing", c.spec, c.conf, &IO{})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", c.id).Warn("could not create servicing container")
|
||||
return
|
||||
}
|
||||
defer serviceCtr.Close()
|
||||
|
||||
err = serviceCtr.Start(ctx)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", c.id).Warn("failed to start servicing container")
|
||||
serviceCtr.Terminate()
|
||||
return
|
||||
}
|
||||
|
||||
err = serviceCtr.processes[0].Wait()
|
||||
if err == nil {
|
||||
_, err = serviceCtr.processes[0].ExitCode()
|
||||
log.G(ctx).WithError(err).WithField("id", c.id).Errorf("failed to retrieve servicing container exit code")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err := serviceCtr.Terminate(); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", c.id).Errorf("failed to terminate servicing container")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Container) ExitCode() (uint32, error) {
|
||||
if len(c.processes) == 0 {
|
||||
return 255, errors.New("container not started")
|
||||
}
|
||||
return c.processes[0].ExitCode()
|
||||
}
|
||||
|
||||
func (c *Container) GetConfiguration() Configuration {
|
||||
return c.conf
|
||||
}
|
||||
|
||||
func (c *Container) AddProcess(ctx context.Context, id string, spec *specs.Process, io *IO) (*Process, error) {
|
||||
if len(c.processes) == 0 {
|
||||
return nil, errors.New("container not started")
|
||||
}
|
||||
return c.addProcess(ctx, id, spec, io)
|
||||
}
|
||||
|
||||
func (c *Container) addProcess(ctx context.Context, id string, spec *specs.Process, pio *IO) (*Process, error) {
|
||||
// If we don't have a process yet, reused the container pid
|
||||
var pid uint32
|
||||
if len(c.processes) == 0 {
|
||||
pid = c.pid
|
||||
} else {
|
||||
pid, err := c.hcs.pidPool.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.hcs.pidPool.Put(pid)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
conf := hcsshim.ProcessConfig{
|
||||
EmulateConsole: pio.terminal,
|
||||
CreateStdInPipe: pio.stdin != nil,
|
||||
CreateStdOutPipe: pio.stdout != nil,
|
||||
CreateStdErrPipe: pio.stderr != nil,
|
||||
User: spec.User.Username,
|
||||
CommandLine: strings.Join(spec.Args, " "),
|
||||
Environment: ociSpecEnvToHCSEnv(spec.Env),
|
||||
WorkingDirectory: spec.Cwd,
|
||||
ConsoleSize: [2]uint{spec.ConsoleSize.Height, spec.ConsoleSize.Width},
|
||||
}
|
||||
|
||||
if conf.WorkingDirectory == "" {
|
||||
conf.WorkingDirectory = c.spec.Process.Cwd
|
||||
}
|
||||
|
||||
proc, err := c.CreateProcess(&conf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create process")
|
||||
}
|
||||
|
||||
stdin, stdout, stderr, err := proc.Stdio()
|
||||
if err != nil {
|
||||
proc.Kill()
|
||||
return nil, errors.Wrapf(err, "failed to retrieve process stdio")
|
||||
}
|
||||
|
||||
if pio.stdin != nil {
|
||||
go func() {
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdin: copy started")
|
||||
io.Copy(stdin, pio.stdin)
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdin: copy done")
|
||||
stdin.Close()
|
||||
pio.stdin.Close()
|
||||
}()
|
||||
} else {
|
||||
proc.CloseStdin()
|
||||
}
|
||||
|
||||
if pio.stdout != nil {
|
||||
go func() {
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdout: copy started")
|
||||
io.Copy(pio.stdout, stdout)
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stdout: copy done")
|
||||
stdout.Close()
|
||||
pio.stdout.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
if pio.stderr != nil {
|
||||
go func() {
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stderr: copy started")
|
||||
io.Copy(pio.stderr, stderr)
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": c.id, "pid": pid}).Debug("stderr: copy done")
|
||||
stderr.Close()
|
||||
pio.stderr.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
p := &Process{
|
||||
id: id,
|
||||
Process: proc,
|
||||
pid: pid,
|
||||
io: pio,
|
||||
ecSync: make(chan struct{}),
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
c.processes = append(c.processes, p)
|
||||
idx := len(c.processes) - 1
|
||||
c.Unlock()
|
||||
|
||||
go func() {
|
||||
p.ec, p.ecErr = processExitCode(c.ID(), p)
|
||||
close(p.ecSync)
|
||||
c.Lock()
|
||||
p.Delete()
|
||||
// Remove process from slice (but keep the init one around)
|
||||
if idx > 0 {
|
||||
c.processes[idx] = c.processes[len(c.processes)-1]
|
||||
c.processes[len(c.processes)-1] = nil
|
||||
c.processes = c.processes[:len(c.processes)-1]
|
||||
}
|
||||
c.Unlock()
|
||||
}()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// newHCSConfiguration generates a hcsshim configuration from the instance
|
||||
// OCI Spec and hcs.Configuration.
|
||||
func newContainerConfig(owner, id string, spec specs.Spec, conf Configuration) (*hcsshim.ContainerConfig, error) {
|
||||
configuration := &hcsshim.ContainerConfig{
|
||||
SystemType: "Container",
|
||||
Name: id,
|
||||
Owner: owner,
|
||||
HostName: spec.Hostname,
|
||||
IgnoreFlushesDuringBoot: conf.IgnoreFlushesDuringBoot,
|
||||
HvPartition: conf.UseHyperV,
|
||||
AllowUnqualifiedDNSQuery: conf.AllowUnqualifiedDNSQuery,
|
||||
EndpointList: conf.NetworkEndpoints,
|
||||
NetworkSharedContainerName: conf.NetworkSharedContainerID,
|
||||
Credentials: conf.Credentials,
|
||||
}
|
||||
|
||||
// TODO: use the create request Mount for those
|
||||
for _, layerPath := range conf.Layers {
|
||||
_, filename := filepath.Split(layerPath)
|
||||
guid, err := hcsshim.NameToGuid(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configuration.Layers = append(configuration.Layers, hcsshim.Layer{
|
||||
ID: guid.ToString(),
|
||||
Path: layerPath,
|
||||
})
|
||||
}
|
||||
|
||||
if len(spec.Mounts) > 0 {
|
||||
mds := make([]hcsshim.MappedDir, len(spec.Mounts))
|
||||
for i, mount := range spec.Mounts {
|
||||
mds[i] = hcsshim.MappedDir{
|
||||
HostPath: mount.Source,
|
||||
ContainerPath: mount.Destination,
|
||||
ReadOnly: false,
|
||||
}
|
||||
for _, o := range mount.Options {
|
||||
if strings.ToLower(o) == "ro" {
|
||||
mds[i].ReadOnly = true
|
||||
}
|
||||
}
|
||||
}
|
||||
configuration.MappedDirectories = mds
|
||||
}
|
||||
|
||||
if conf.DNSSearchList != nil {
|
||||
configuration.DNSSearchList = strings.Join(conf.DNSSearchList, ",")
|
||||
}
|
||||
|
||||
if configuration.HvPartition {
|
||||
for _, layerPath := range conf.Layers {
|
||||
utilityVMPath := filepath.Join(layerPath, "UtilityVM")
|
||||
_, err := os.Stat(utilityVMPath)
|
||||
if err == nil {
|
||||
configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: utilityVMPath}
|
||||
break
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, errors.Wrapf(err, "failed to access layer %s", layerPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(configuration.Layers) == 0 {
|
||||
// TODO: support starting with 0 layers, this mean we need the "filter" directory as parameter
|
||||
return nil, errors.New("at least one layers must be provided")
|
||||
}
|
||||
|
||||
di := hcsshim.DriverInfo{
|
||||
Flavour: 1, // filter driver
|
||||
}
|
||||
|
||||
if len(configuration.Layers) > 0 {
|
||||
di.HomeDir = filepath.Dir(conf.Layers[0])
|
||||
}
|
||||
|
||||
// Windows doesn't support creating a container with a readonly
|
||||
// filesystem, so always create a RW one
|
||||
if err := hcsshim.CreateSandboxLayer(di, id, conf.Layers[0], conf.Layers); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
|
||||
id, configuration.Layers, di)
|
||||
}
|
||||
configuration.LayerFolderPath = filepath.Join(di.HomeDir, id)
|
||||
|
||||
err := hcsshim.ActivateLayer(di, id)
|
||||
if err != nil {
|
||||
removeLayer(context.TODO(), configuration.LayerFolderPath)
|
||||
return nil, errors.Wrapf(err, "failed to active layer %s", configuration.LayerFolderPath)
|
||||
}
|
||||
|
||||
err = hcsshim.PrepareLayer(di, id, conf.Layers)
|
||||
if err != nil {
|
||||
removeLayer(context.TODO(), configuration.LayerFolderPath)
|
||||
return nil, errors.Wrapf(err, "failed to prepare layer %s", configuration.LayerFolderPath)
|
||||
}
|
||||
|
||||
volumePath, err := hcsshim.GetLayerMountPath(di, id)
|
||||
if err != nil {
|
||||
if err := hcsshim.DestroyLayer(di, id); err != nil {
|
||||
log.L.Warnf("failed to DestroyLayer %s: %s", id, err)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
|
||||
}
|
||||
configuration.VolumePath = volumePath
|
||||
|
||||
return configuration, nil
|
||||
}
|
||||
|
||||
// removeLayer deletes the given layer, all associated containers must have
|
||||
// been shutdown for this to succeed.
|
||||
func removeLayer(ctx context.Context, path string) error {
|
||||
layerID := filepath.Base(path)
|
||||
parentPath := filepath.Dir(path)
|
||||
di := hcsshim.DriverInfo{
|
||||
Flavour: 1, // filter driver
|
||||
HomeDir: parentPath,
|
||||
}
|
||||
|
||||
err := hcsshim.UnprepareLayer(di, layerID)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to unprepare layer %s for removal", path)
|
||||
}
|
||||
|
||||
err = hcsshim.DeactivateLayer(di, layerID)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to deactivate layer %s for removal", path)
|
||||
}
|
||||
|
||||
removePath := filepath.Join(parentPath, fmt.Sprintf("%s-removing", layerID))
|
||||
err = os.Rename(path, removePath)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to rename container layer %s for removal", path)
|
||||
removePath = path
|
||||
}
|
||||
if err := hcsshim.DestroyLayer(di, removePath); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed to remove container layer %s", removePath)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ociSpecEnvToHCSEnv converts from the OCI Spec ENV format to the one
|
||||
// expected by HCS.
|
||||
func ociSpecEnvToHCSEnv(a []string) map[string]string {
|
||||
env := make(map[string]string)
|
||||
for _, s := range a {
|
||||
arr := strings.SplitN(s, "=", 2)
|
||||
if len(arr) == 2 {
|
||||
env[arr[0]] = arr[1]
|
||||
}
|
||||
}
|
||||
return env
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Process struct {
|
||||
hcsshim.Process
|
||||
|
||||
id string
|
||||
pid uint32
|
||||
io *IO
|
||||
ec uint32
|
||||
exitedAt time.Time
|
||||
ecErr error
|
||||
ecSync chan struct{}
|
||||
}
|
||||
|
||||
func (p *Process) ID() string {
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *Process) Pid() uint32 {
|
||||
return p.pid
|
||||
}
|
||||
|
||||
func (p *Process) ExitCode() (uint32, error) {
|
||||
<-p.ecSync
|
||||
return p.ec, p.ecErr
|
||||
}
|
||||
|
||||
func (p *Process) ExitedAt() time.Time {
|
||||
return p.exitedAt
|
||||
}
|
||||
|
||||
func (p *Process) Status() runtime.Status {
|
||||
select {
|
||||
case <-p.ecSync:
|
||||
return runtime.StoppedStatus
|
||||
default:
|
||||
}
|
||||
|
||||
return runtime.RunningStatus
|
||||
}
|
||||
|
||||
func (p *Process) Delete() error {
|
||||
p.io.Close()
|
||||
return p.Close()
|
||||
}
|
||||
|
||||
func processExitCode(containerID string, p *Process) (uint32, error) {
|
||||
if err := p.Wait(); err != nil {
|
||||
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
|
||||
return 255, errors.Wrapf(err, "failed to wait for container '%s' process %u", containerID, p.pid)
|
||||
}
|
||||
// process is probably dead, let's try to get its exit code
|
||||
}
|
||||
|
||||
ec, err := p.Process.ExitCode()
|
||||
if err != nil {
|
||||
if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
|
||||
return 255, errors.Wrapf(err, "failed to get container '%s' process %d exit code", containerID, p.pid)
|
||||
}
|
||||
// Well, unknown exit code it is
|
||||
ec = 255
|
||||
}
|
||||
p.exitedAt = time.Now()
|
||||
return uint32(ec), err
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type IO struct {
|
||||
stdin net.Conn
|
||||
stdout net.Conn
|
||||
stderr net.Conn
|
||||
terminal bool
|
||||
}
|
||||
|
||||
// NewIO connects to the provided pipe addresses
|
||||
func NewIO(stdin, stdout, stderr string, terminal bool) (*IO, error) {
|
||||
var (
|
||||
c net.Conn
|
||||
err error
|
||||
io IO
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
io.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for _, p := range []struct {
|
||||
name string
|
||||
open bool
|
||||
conn *net.Conn
|
||||
}{
|
||||
{
|
||||
name: stdin,
|
||||
open: stdin != "",
|
||||
conn: &io.stdin,
|
||||
},
|
||||
{
|
||||
name: stdout,
|
||||
open: stdout != "",
|
||||
conn: &io.stdout,
|
||||
},
|
||||
{
|
||||
name: stderr,
|
||||
open: !terminal && stderr != "",
|
||||
conn: &io.stderr,
|
||||
},
|
||||
} {
|
||||
if p.open {
|
||||
dialTimeout := 3 * time.Second
|
||||
c, err = winio.DialPipe(p.name, &dialTimeout)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to %s", p.name)
|
||||
}
|
||||
*p.conn = c
|
||||
}
|
||||
}
|
||||
|
||||
return &io, nil
|
||||
}
|
||||
|
||||
// Close terminates all successfully dialed IO connections
|
||||
func (i *IO) Close() {
|
||||
for _, cn := range []net.Conn{i.stdin, i.stdout, i.stderr} {
|
||||
if cn != nil {
|
||||
cn.Close()
|
||||
cn = nil
|
||||
}
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcs
|
||||
|
||||
import "time"
|
||||
|
||||
type Configuration struct {
|
||||
UseHyperV bool `json:"useHyperV,omitempty"`
|
||||
|
||||
Layers []string `json:"layers"`
|
||||
|
||||
TerminateDuration time.Duration `json:"terminateDuration,omitempty"`
|
||||
|
||||
IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"`
|
||||
|
||||
AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"`
|
||||
DNSSearchList []string `json:"dnsSearchList,omitempty"`
|
||||
NetworkEndpoints []string `json:"networkEndpoints,omitempty"`
|
||||
NetworkSharedContainerID string
|
||||
|
||||
Credentials string `json:"credentials,omitempty"`
|
||||
}
|
190
windows/hcsshim.go
Normal file
190
windows/hcsshim.go
Normal file
@ -0,0 +1,190 @@
|
||||
//+build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// newContainerConfig generates a hcsshim container configuration from the
|
||||
// provided OCI Spec
|
||||
func newContainerConfig(ctx context.Context, owner, id string, spec *specs.Spec) (*hcsshim.ContainerConfig, error) {
|
||||
if len(spec.Windows.LayerFolders) == 0 {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
|
||||
"spec.Windows.LayerFolders cannot be empty")
|
||||
}
|
||||
|
||||
var (
|
||||
layerFolders = spec.Windows.LayerFolders
|
||||
conf = &hcsshim.ContainerConfig{
|
||||
SystemType: "Container",
|
||||
Name: id,
|
||||
Owner: owner,
|
||||
HostName: spec.Hostname,
|
||||
IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
|
||||
AllowUnqualifiedDNSQuery: spec.Windows.Network.AllowUnqualifiedDNSQuery,
|
||||
EndpointList: spec.Windows.Network.EndpointList,
|
||||
NetworkSharedContainerName: spec.Windows.Network.NetworkSharedContainerName,
|
||||
}
|
||||
)
|
||||
|
||||
if spec.Windows.CredentialSpec != nil {
|
||||
conf.Credentials = spec.Windows.CredentialSpec.(string)
|
||||
}
|
||||
|
||||
// TODO: use the create request Mount for those
|
||||
for _, layerPath := range layerFolders {
|
||||
_, filename := filepath.Split(layerPath)
|
||||
guid, err := hcsshim.NameToGuid(filename)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get GUID for %s", filename)
|
||||
}
|
||||
conf.Layers = append(conf.Layers, hcsshim.Layer{
|
||||
ID: guid.ToString(),
|
||||
Path: layerPath,
|
||||
})
|
||||
}
|
||||
|
||||
if len(spec.Mounts) > 0 {
|
||||
mds := make([]hcsshim.MappedDir, len(spec.Mounts))
|
||||
for i, mount := range spec.Mounts {
|
||||
mds[i] = hcsshim.MappedDir{
|
||||
HostPath: mount.Source,
|
||||
ContainerPath: mount.Destination,
|
||||
ReadOnly: false,
|
||||
}
|
||||
for _, o := range mount.Options {
|
||||
if strings.ToLower(o) == "ro" {
|
||||
mds[i].ReadOnly = true
|
||||
}
|
||||
}
|
||||
}
|
||||
conf.MappedDirectories = mds
|
||||
}
|
||||
|
||||
if spec.Windows.Network.DNSSearchList != nil {
|
||||
conf.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
|
||||
}
|
||||
|
||||
if spec.Windows.HyperV != nil {
|
||||
conf.HvPartition = true
|
||||
for _, layerPath := range layerFolders {
|
||||
utilityVMPath := spec.Windows.HyperV.UtilityVMPath
|
||||
_, err := os.Stat(utilityVMPath)
|
||||
if err == nil {
|
||||
conf.HvRuntime = &hcsshim.HvRuntime{ImagePath: utilityVMPath}
|
||||
break
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, errors.Wrapf(err, "failed to access layer %s", layerPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
di = hcsshim.DriverInfo{
|
||||
Flavour: 1, // filter driver
|
||||
HomeDir: filepath.Dir(layerFolders[0]),
|
||||
}
|
||||
)
|
||||
|
||||
// TODO: Once there is a snapshotter for windows, this can be deleted.
|
||||
// The R/W Layer should come from the Rootfs Mounts provided
|
||||
//
|
||||
// Windows doesn't support creating a container with a readonly
|
||||
// filesystem, so always create a RW one
|
||||
if err = hcsshim.CreateSandboxLayer(di, id, layerFolders[0], layerFolders); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
|
||||
id, layerFolders, di)
|
||||
}
|
||||
conf.LayerFolderPath = filepath.Join(di.HomeDir, id)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
removeLayer(ctx, conf.LayerFolderPath)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = hcsshim.ActivateLayer(di, id); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to activate layer %s", conf.LayerFolderPath)
|
||||
}
|
||||
|
||||
if err = hcsshim.PrepareLayer(di, id, layerFolders); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to prepare layer %s", conf.LayerFolderPath)
|
||||
}
|
||||
|
||||
conf.VolumePath, err = hcsshim.GetLayerMountPath(di, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// removeLayer deletes the given layer, all associated containers must have
|
||||
// been shutdown for this to succeed.
|
||||
func removeLayer(ctx context.Context, path string) error {
|
||||
var (
|
||||
err error
|
||||
layerID = filepath.Base(path)
|
||||
parentPath = filepath.Dir(path)
|
||||
di = hcsshim.DriverInfo{
|
||||
Flavour: 1, // filter driver
|
||||
HomeDir: parentPath,
|
||||
}
|
||||
)
|
||||
|
||||
if err = hcsshim.UnprepareLayer(di, layerID); err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to unprepare layer %s for removal", path)
|
||||
}
|
||||
|
||||
if err = hcsshim.DeactivateLayer(di, layerID); err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to deactivate layer %s for removal", path)
|
||||
}
|
||||
|
||||
removePath := filepath.Join(parentPath, fmt.Sprintf("%s-removing", layerID))
|
||||
if err = os.Rename(path, removePath); err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to rename container layer %s for removal", path)
|
||||
removePath = path
|
||||
}
|
||||
|
||||
if err = hcsshim.DestroyLayer(di, removePath); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed to remove container layer %s", removePath)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newProcessConfig(spec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig {
|
||||
conf := &hcsshim.ProcessConfig{
|
||||
EmulateConsole: pset.src.Terminal,
|
||||
CreateStdInPipe: pset.stdin != nil,
|
||||
CreateStdOutPipe: pset.stdout != nil,
|
||||
CreateStdErrPipe: pset.stderr != nil,
|
||||
User: spec.User.Username,
|
||||
CommandLine: strings.Join(spec.Args, " "),
|
||||
Environment: make(map[string]string),
|
||||
WorkingDirectory: spec.Cwd,
|
||||
ConsoleSize: [2]uint{spec.ConsoleSize.Height, spec.ConsoleSize.Width},
|
||||
}
|
||||
|
||||
// Convert OCI Env format to HCS's
|
||||
for _, s := range spec.Env {
|
||||
arr := strings.SplitN(s, "=", 2)
|
||||
if len(arr) == 2 {
|
||||
conf.Environment[arr[0]] = arr[1]
|
||||
}
|
||||
}
|
||||
|
||||
return conf
|
||||
}
|
2
windows/hcsshimopts/doc.go
Normal file
2
windows/hcsshimopts/doc.go
Normal file
@ -0,0 +1,2 @@
|
||||
// hcsshimopts holds the windows runtime specific options
|
||||
package hcsshimopts
|
352
windows/hcsshimopts/hcsshim.pb.go
Normal file
352
windows/hcsshimopts/hcsshim.pb.go
Normal file
@ -0,0 +1,352 @@
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: github.com/containerd/containerd/windows/hcsshimopts/hcsshim.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package hcsshimopts is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/windows/hcsshimopts/hcsshim.proto
|
||||
|
||||
It has these top-level messages:
|
||||
CreateOptions
|
||||
*/
|
||||
package hcsshimopts
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import _ "github.com/golang/protobuf/ptypes/duration"
|
||||
|
||||
import time "time"
|
||||
|
||||
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
var _ = time.Kitchen
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type CreateOptions struct {
|
||||
TerminateDuration time.Duration `protobuf:"bytes,1,opt,name=terminate_duration,json=terminateDuration,stdduration" json:"terminate_duration"`
|
||||
}
|
||||
|
||||
func (m *CreateOptions) Reset() { *m = CreateOptions{} }
|
||||
func (*CreateOptions) ProtoMessage() {}
|
||||
func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorHcsshim, []int{0} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*CreateOptions)(nil), "containerd.windows.hcsshim.CreateOptions")
|
||||
}
|
||||
func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintHcsshim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.TerminateDuration)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.TerminateDuration, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Hcsshim(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Hcsshim(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintHcsshim(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *CreateOptions) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.TerminateDuration)
|
||||
n += 1 + l + sovHcsshim(uint64(l))
|
||||
return n
|
||||
}
|
||||
|
||||
func sovHcsshim(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozHcsshim(x uint64) (n int) {
|
||||
return sovHcsshim(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *CreateOptions) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&CreateOptions{`,
|
||||
`TerminateDuration:` + strings.Replace(strings.Replace(this.TerminateDuration.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringHcsshim(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *CreateOptions) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowHcsshim
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TerminateDuration", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowHcsshim
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthHcsshim
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.TerminateDuration, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipHcsshim(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthHcsshim
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipHcsshim(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowHcsshim
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowHcsshim
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowHcsshim
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthHcsshim
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowHcsshim
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipHcsshim(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthHcsshim = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowHcsshim = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/windows/hcsshimopts/hcsshim.proto", fileDescriptorHcsshim)
|
||||
}
|
||||
|
||||
var fileDescriptorHcsshim = []byte{
|
||||
// 227 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4a, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x96, 0x67, 0xe6, 0xa5, 0xe4, 0x97, 0x17, 0xeb, 0x67, 0x24, 0x17, 0x17, 0x67,
|
||||
0x64, 0xe6, 0xe6, 0x17, 0x94, 0xc0, 0xd9, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x52, 0x08,
|
||||
0xd5, 0x7a, 0x50, 0xd5, 0x7a, 0x50, 0x15, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0x65, 0xfa,
|
||||
0x20, 0x16, 0x44, 0x87, 0x94, 0x5c, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54,
|
||||
0x9a, 0xa6, 0x9f, 0x52, 0x5a, 0x94, 0x58, 0x92, 0x99, 0x9f, 0x07, 0x91, 0x57, 0x4a, 0xe6, 0xe2,
|
||||
0x75, 0x2e, 0x4a, 0x4d, 0x2c, 0x49, 0xf5, 0x2f, 0x00, 0x89, 0x16, 0x0b, 0x05, 0x71, 0x09, 0x95,
|
||||
0xa4, 0x16, 0xe5, 0x66, 0xe6, 0x25, 0x96, 0xa4, 0xc6, 0xc3, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a,
|
||||
0x70, 0x1b, 0x49, 0xea, 0x41, 0x4c, 0xd3, 0x83, 0x99, 0xa6, 0xe7, 0x02, 0x55, 0xe0, 0xc4, 0x71,
|
||||
0xe2, 0x9e, 0x3c, 0xc3, 0x8c, 0xfb, 0xf2, 0x8c, 0x41, 0x82, 0x70, 0xed, 0x70, 0xc9, 0xa8, 0x13,
|
||||
0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
|
||||
0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x46, 0x39, 0x90, 0x13, 0x28, 0xd6, 0x48, 0xec,
|
||||
0x24, 0x36, 0xb0, 0x5b, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x75, 0x31, 0x65, 0xd0, 0x5f,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
12
windows/hcsshimopts/hcsshim.proto
Normal file
12
windows/hcsshimopts/hcsshim.proto
Normal file
@ -0,0 +1,12 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.windows.hcsshim;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/windows/hcsshimopts;hcsshimopts";
|
||||
|
||||
message CreateOptions {
|
||||
google.protobuf.Duration terminate_duration = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];
|
||||
}
|
110
windows/io.go
Normal file
110
windows/io.go
Normal file
@ -0,0 +1,110 @@
|
||||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type pipeSet struct {
|
||||
src runtime.IO
|
||||
stdin net.Conn
|
||||
stdout net.Conn
|
||||
stderr net.Conn
|
||||
}
|
||||
|
||||
// NewIO connects to the provided pipe addresses
|
||||
func newPipeSet(ctx context.Context, io runtime.IO) (*pipeSet, error) {
|
||||
var (
|
||||
err error
|
||||
c net.Conn
|
||||
wg sync.WaitGroup
|
||||
set = &pipeSet{src: io}
|
||||
ch = make(chan error)
|
||||
opened = 0
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
go func() {
|
||||
for i := 0; i < opened; i++ {
|
||||
// Drain the channel to avoid leaking the goroutines
|
||||
<-ch
|
||||
}
|
||||
close(ch)
|
||||
wg.Wait()
|
||||
set.Close()
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
for _, p := range [3]struct {
|
||||
name string
|
||||
open bool
|
||||
conn *net.Conn
|
||||
}{
|
||||
{
|
||||
name: io.Stdin,
|
||||
open: io.Stdin != "",
|
||||
conn: &set.stdin,
|
||||
},
|
||||
{
|
||||
name: io.Stdout,
|
||||
open: io.Stdout != "",
|
||||
conn: &set.stdout,
|
||||
},
|
||||
{
|
||||
name: io.Stderr,
|
||||
open: !io.Terminal && io.Stderr != "",
|
||||
conn: &set.stderr,
|
||||
},
|
||||
} {
|
||||
if p.open {
|
||||
wg.Add(1)
|
||||
opened++
|
||||
go func(name string, conn *net.Conn) {
|
||||
dialTimeout := 3 * time.Second
|
||||
c, err = winio.DialPipe(name, &dialTimeout)
|
||||
if err != nil {
|
||||
ch <- errors.Wrapf(err, "failed to connect to %s", name)
|
||||
}
|
||||
*conn = c
|
||||
ch <- nil
|
||||
wg.Done()
|
||||
}(p.name, p.conn)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < opened; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case e := <-ch:
|
||||
if e != nil {
|
||||
if err == nil {
|
||||
err = e
|
||||
} else {
|
||||
err = errors.Wrapf(err, e.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return set, err
|
||||
}
|
||||
|
||||
// Close terminates all successfully dialed IO connections
|
||||
func (p *pipeSet) Close() {
|
||||
for _, cn := range []net.Conn{p.stdin, p.stdout, p.stderr} {
|
||||
if cn != nil {
|
||||
cn.Close()
|
||||
}
|
||||
}
|
||||
}
|
54
windows/meta.go
Normal file
54
windows/meta.go
Normal file
@ -0,0 +1,54 @@
|
||||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
// TODO: remove this file (i.e. meta.go) once we have a snapshotter
|
||||
|
||||
import (
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func newLayerFolderStore(tx *bolt.Tx) *layerFolderStore {
|
||||
return &layerFolderStore{tx}
|
||||
}
|
||||
|
||||
type layerFolderStore struct {
|
||||
tx *bolt.Tx
|
||||
}
|
||||
|
||||
func (s *layerFolderStore) Create(id, layer string) error {
|
||||
bkt, err := s.tx.CreateBucketIfNotExists([]byte(pluginID))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create bucket %s", pluginID)
|
||||
}
|
||||
err = bkt.Put([]byte(id), []byte(layer))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to store entry %s:%s", id, layer)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *layerFolderStore) Get(id string) (string, error) {
|
||||
bkt := s.tx.Bucket([]byte(pluginID))
|
||||
if bkt == nil {
|
||||
return "", errors.Wrapf(errdefs.ErrNotFound, "bucket %s", pluginID)
|
||||
}
|
||||
|
||||
return string(bkt.Get([]byte(id))), nil
|
||||
}
|
||||
|
||||
func (s *layerFolderStore) Delete(id string) error {
|
||||
bkt := s.tx.Bucket([]byte(pluginID))
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "bucket %s", pluginID)
|
||||
}
|
||||
|
||||
if err := bkt.Delete([]byte(id)); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete entry %s", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,25 +1,25 @@
|
||||
// +build windows
|
||||
|
||||
package pid
|
||||
package windows
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Pool struct {
|
||||
type pidPool struct {
|
||||
sync.Mutex
|
||||
pool map[uint32]struct{}
|
||||
cur uint32
|
||||
}
|
||||
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
func newPidPool() *pidPool {
|
||||
return &pidPool{
|
||||
pool: make(map[uint32]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) Get() (uint32, error) {
|
||||
func (p *pidPool) Get() (uint32, error) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
@ -31,6 +31,7 @@ func (p *Pool) Get() (uint32, error) {
|
||||
}
|
||||
if _, ok := p.pool[pid]; !ok {
|
||||
p.cur = pid
|
||||
p.pool[pid] = struct{}{}
|
||||
return pid, nil
|
||||
}
|
||||
pid++
|
||||
@ -39,7 +40,7 @@ func (p *Pool) Get() (uint32, error) {
|
||||
return 0, errors.New("pid pool exhausted")
|
||||
}
|
||||
|
||||
func (p *Pool) Put(pid uint32) {
|
||||
func (p *pidPool) Put(pid uint32) {
|
||||
p.Lock()
|
||||
delete(p.pool, pid)
|
||||
p.Unlock()
|
@ -4,39 +4,80 @@ package windows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/containerd/containerd/windows/hcs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// process implements containerd.Process and containerd.State
|
||||
type process struct {
|
||||
*hcs.Process
|
||||
hcs hcsshim.Process
|
||||
|
||||
id string
|
||||
pid uint32
|
||||
io *pipeSet
|
||||
status runtime.Status
|
||||
task *task
|
||||
|
||||
exitCh chan struct{}
|
||||
exitCode uint32
|
||||
exitTime time.Time
|
||||
}
|
||||
|
||||
func (p *process) ID() string {
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *process) State(ctx context.Context) (runtime.State, error) {
|
||||
return runtime.State{
|
||||
Pid: p.Pid(),
|
||||
Status: p.Status(),
|
||||
Status: p.Status(),
|
||||
Pid: p.pid,
|
||||
Stdin: p.io.src.Stdin,
|
||||
Stdout: p.io.src.Stdout,
|
||||
Stderr: p.io.src.Stderr,
|
||||
Terminal: p.io.src.Terminal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *process) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
return p.Process.Kill()
|
||||
}
|
||||
|
||||
func (p *process) Status() runtime.Status {
|
||||
return p.Process.Status()
|
||||
if p.task.getStatus() == runtime.PausedStatus {
|
||||
return runtime.PausedStatus
|
||||
}
|
||||
|
||||
var status runtime.Status
|
||||
select {
|
||||
case <-p.exitCh:
|
||||
status = runtime.StoppedStatus
|
||||
default:
|
||||
status = runtime.RunningStatus
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (p *process) Pid() uint32 {
|
||||
return p.Process.Pid()
|
||||
}
|
||||
|
||||
func (p *process) CloseIO(ctx context.Context) error {
|
||||
return p.Process.CloseStdin()
|
||||
func (p *process) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
// On windows all signals kill the process
|
||||
return errors.Wrap(p.hcs.Kill(), "failed to kill process")
|
||||
}
|
||||
|
||||
func (p *process) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
|
||||
return p.Process.ResizeConsole(uint16(size.Width), uint16(size.Height))
|
||||
err := p.hcs.ResizeConsole(uint16(size.Width), uint16(size.Height))
|
||||
return errors.Wrap(err, "failed to resize process console")
|
||||
}
|
||||
|
||||
func (p *process) CloseIO(ctx context.Context) error {
|
||||
return errors.Wrap(p.hcs.CloseStdin(), "failed to close stdin")
|
||||
}
|
||||
|
||||
func (p *process) Pid() uint32 {
|
||||
return p.pid
|
||||
}
|
||||
|
||||
func (p *process) ExitCode() (uint32, time.Time, error) {
|
||||
if p.Status() != runtime.StoppedStatus {
|
||||
return 255, time.Time{}, errors.Wrap(errdefs.ErrFailedPrecondition, "process is not stopped")
|
||||
}
|
||||
return p.exitCode, p.exitTime, nil
|
||||
}
|
||||
|
@ -6,166 +6,407 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/boltdb/bolt"
|
||||
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||
containerdtypes "github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
"github.com/containerd/containerd/windows/hcs"
|
||||
"github.com/containerd/containerd/windows/pid"
|
||||
"github.com/containerd/containerd/windows/hcsshimopts"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
runtimeName = "windows"
|
||||
owner = "containerd"
|
||||
runtimeName = "windows"
|
||||
hcsshimOwner = "containerd"
|
||||
defaultTerminateDuration = 5 * time.Minute
|
||||
)
|
||||
|
||||
var _ = (runtime.Runtime)(&Runtime{})
|
||||
var (
|
||||
pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtimeName)
|
||||
)
|
||||
|
||||
var _ = (runtime.Runtime)(&windowsRuntime{})
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
ID: "windows",
|
||||
ID: runtimeName,
|
||||
Type: plugin.RuntimePlugin,
|
||||
Init: New,
|
||||
Requires: []plugin.PluginType{
|
||||
plugin.MetadataPlugin,
|
||||
},
|
||||
})
|
||||
typeurl.Register(&RuntimeSpec{}, "windows/Spec")
|
||||
}
|
||||
|
||||
func New(ic *plugin.InitContext) (interface{}, error) {
|
||||
rootDir := filepath.Join(ic.Root)
|
||||
if err := os.MkdirAll(rootDir, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create state directory at %s", rootDir)
|
||||
if err := os.MkdirAll(ic.Root, 0700); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create state directory at %s", ic.Root)
|
||||
}
|
||||
|
||||
c, cancel := context.WithCancel(ic.Context)
|
||||
r := &Runtime{
|
||||
pidPool: pid.NewPool(),
|
||||
containers: make(map[string]*container),
|
||||
events: make(chan interface{}, 2048),
|
||||
eventsContext: c,
|
||||
eventsCancel: cancel,
|
||||
rootDir: rootDir,
|
||||
hcs: hcs.New(owner, rootDir),
|
||||
}
|
||||
|
||||
// Terminate all previous container that we may have started. We don't
|
||||
// support restoring containers
|
||||
ctrs, err := loadContainers(ic.Context, r.hcs)
|
||||
m, err := ic.Get(plugin.MetadataPlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, c := range ctrs {
|
||||
c.ctr.Delete(ic.Context)
|
||||
//r.sendEvent(c.ctr.ID(), events.RuntimeEvent_EXIT, c.ctr.Pid(), 255, time.Time{})
|
||||
r := &windowsRuntime{
|
||||
root: ic.Root,
|
||||
pidPool: newPidPool(),
|
||||
|
||||
events: make(chan interface{}, 4096),
|
||||
emitter: ic.Emitter,
|
||||
// TODO(mlaventure): windows needs a stat monitor
|
||||
monitor: nil,
|
||||
tasks: runtime.NewTaskList(),
|
||||
db: m.(*bolt.DB),
|
||||
}
|
||||
|
||||
// Try to delete the old state dir and recreate it
|
||||
stateDir := filepath.Join(ic.Root, "state")
|
||||
if err := os.RemoveAll(stateDir); err != nil {
|
||||
log.G(c).WithError(err).Warnf("failed to cleanup old state directory at %s", stateDir)
|
||||
}
|
||||
if err := os.MkdirAll(stateDir, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create state directory at %s", stateDir)
|
||||
}
|
||||
r.stateDir = stateDir
|
||||
// Load our existing containers and kill/delete them. We don't support
|
||||
// reattaching to them
|
||||
r.cleanup(ic.Context)
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
type Runtime struct {
|
||||
type windowsRuntime struct {
|
||||
sync.Mutex
|
||||
|
||||
rootDir string
|
||||
stateDir string
|
||||
pidPool *pid.Pool
|
||||
root string
|
||||
pidPool *pidPool
|
||||
|
||||
hcs *hcs.HCS
|
||||
emitter *events.Emitter
|
||||
events chan interface{}
|
||||
|
||||
containers map[string]*container
|
||||
|
||||
events chan interface{}
|
||||
eventsContext context.Context
|
||||
eventsCancel func()
|
||||
monitor runtime.TaskMonitor
|
||||
tasks *runtime.TaskList
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
type RuntimeSpec struct {
|
||||
// Spec is the OCI spec
|
||||
OCISpec specs.Spec
|
||||
|
||||
// HCS specific options
|
||||
hcs.Configuration
|
||||
func (r *windowsRuntime) ID() string {
|
||||
return pluginID
|
||||
}
|
||||
|
||||
func (r *Runtime) ID() string {
|
||||
return fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtimeName)
|
||||
}
|
||||
|
||||
func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (runtime.Task, error) {
|
||||
v, err := typeurl.UnmarshalAny(opts.Spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rtSpec := v.(*RuntimeSpec)
|
||||
ctr, err := newContainer(ctx, r.hcs, id, rtSpec, opts.IO)
|
||||
func (r *windowsRuntime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (runtime.Task, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Lock()
|
||||
r.containers[id] = ctr
|
||||
r.Unlock()
|
||||
s, err := typeurl.UnmarshalAny(opts.Spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec := s.(*specs.Spec)
|
||||
|
||||
return ctr, nil
|
||||
var createOpts *hcsshimopts.CreateOptions
|
||||
if opts.Options != nil {
|
||||
o, err := typeurl.UnmarshalAny(opts.Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createOpts = o.(*hcsshimopts.CreateOptions)
|
||||
} else {
|
||||
createOpts = &hcsshimopts.CreateOptions{}
|
||||
}
|
||||
|
||||
if createOpts.TerminateDuration == 0 {
|
||||
createOpts.TerminateDuration = defaultTerminateDuration
|
||||
}
|
||||
|
||||
return r.newTask(ctx, namespace, id, spec, opts.IO, createOpts)
|
||||
}
|
||||
|
||||
func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, error) {
|
||||
wc, ok := c.(*container)
|
||||
func (r *windowsRuntime) Get(ctx context.Context, id string) (runtime.Task, error) {
|
||||
return r.tasks.Get(ctx, id)
|
||||
}
|
||||
|
||||
func (r *windowsRuntime) Tasks(ctx context.Context) ([]runtime.Task, error) {
|
||||
return r.tasks.GetAll(ctx)
|
||||
}
|
||||
|
||||
func (r *windowsRuntime) Delete(ctx context.Context, t runtime.Task) (*runtime.Exit, error) {
|
||||
wt, ok := t.(*task)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("container cannot be cast as *windows.container")
|
||||
}
|
||||
ec, err := wc.ctr.ExitCode()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed to retrieve exit code for container %s", wc.ctr.ID())
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "no a windows task")
|
||||
}
|
||||
|
||||
wc.ctr.Delete(ctx)
|
||||
// TODO(mlaventure): stop monitor on this task
|
||||
|
||||
r.Lock()
|
||||
delete(r.containers, wc.ctr.ID())
|
||||
r.Unlock()
|
||||
var (
|
||||
err error
|
||||
needServicing bool
|
||||
state, _ = wt.State(ctx)
|
||||
)
|
||||
switch state.Status {
|
||||
case runtime.StoppedStatus:
|
||||
// Only try to service a container if it was started and it's not a
|
||||
// servicing task itself
|
||||
if wt.servicing == false {
|
||||
needServicing, err = wt.hcsContainer.HasPendingUpdates()
|
||||
if err != nil {
|
||||
needServicing = false
|
||||
log.G(ctx).WithError(err).
|
||||
WithFields(logrus.Fields{"id": wt.id, "pid": wt.pid}).
|
||||
Error("failed to check if container needs servicing")
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
case runtime.CreatedStatus:
|
||||
// if it's stopped or in created state, we need to shutdown the
|
||||
// container before removing it
|
||||
if err = wt.stop(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrap(errdefs.ErrFailedPrecondition,
|
||||
"cannot delete a non-stopped task")
|
||||
}
|
||||
|
||||
return &runtime.Exit{
|
||||
Status: ec,
|
||||
Timestamp: wc.ctr.Processes()[0].ExitedAt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Runtime) Tasks(ctx context.Context) ([]runtime.Task, error) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
list := make([]runtime.Task, len(r.containers))
|
||||
for _, c := range r.containers {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
list = append(list, c)
|
||||
var rtExit *runtime.Exit
|
||||
if p := wt.getProcess(t.ID()); p != nil {
|
||||
ec, ea, err := p.ExitCode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rtExit = &runtime.Exit{
|
||||
Pid: wt.pid,
|
||||
Status: ec,
|
||||
Timestamp: ea,
|
||||
}
|
||||
} else {
|
||||
rtExit = &runtime.Exit{
|
||||
Pid: wt.pid,
|
||||
Status: 255,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
}
|
||||
return list, nil
|
||||
|
||||
wt.cleanup()
|
||||
r.tasks.Delete(ctx, t)
|
||||
|
||||
r.emitter.Post(events.WithTopic(ctx, runtime.TaskDeleteEventTopic),
|
||||
&eventsapi.TaskDelete{
|
||||
ContainerID: wt.id,
|
||||
Pid: wt.pid,
|
||||
ExitStatus: rtExit.Status,
|
||||
ExitedAt: rtExit.Timestamp,
|
||||
})
|
||||
|
||||
if needServicing {
|
||||
ns, _ := namespaces.Namespace(ctx)
|
||||
serviceCtx := log.WithLogger(context.Background(), log.GetLogger(ctx))
|
||||
serviceCtx = namespaces.WithNamespace(serviceCtx, ns)
|
||||
r.serviceTask(serviceCtx, ns, wt.id+"_servicing", wt.spec)
|
||||
}
|
||||
|
||||
// We were never started, return failure
|
||||
return rtExit, nil
|
||||
}
|
||||
|
||||
func (r *Runtime) Get(ctx context.Context, id string) (runtime.Task, error) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
c, ok := r.containers[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("container %s does not exit", id)
|
||||
func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec *specs.Spec, io runtime.IO, createOpts *hcsshimopts.CreateOptions) (*task, error) {
|
||||
var (
|
||||
err error
|
||||
pset *pipeSet
|
||||
)
|
||||
|
||||
if pset, err = newPipeSet(ctx, io); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
pset.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
var pid uint32
|
||||
if pid, err = r.pidPool.Get(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
r.pidPool.Put(pid)
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
conf *hcsshim.ContainerConfig
|
||||
nsid = namespace + "-" + id
|
||||
)
|
||||
if conf, err = newContainerConfig(ctx, hcsshimOwner, nsid, spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
removeLayer(ctx, conf.LayerFolderPath)
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: remove this once we have a windows snapshotter
|
||||
// Store the LayerFolder in the db so we can clean it if we die
|
||||
if err = r.db.Update(func(tx *bolt.Tx) error {
|
||||
s := newLayerFolderStore(tx)
|
||||
return s.Create(nsid, conf.LayerFolderPath)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if dbErr := r.db.Update(func(tx *bolt.Tx) error {
|
||||
s := newLayerFolderStore(tx)
|
||||
return s.Delete(nsid)
|
||||
}); dbErr != nil {
|
||||
log.G(ctx).WithField("id", id).
|
||||
Error("failed to remove key from metadata")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ctr, err := hcsshim.CreateContainer(nsid, conf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "hcsshim failed to create task")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
ctr.Terminate()
|
||||
ctr.Wait()
|
||||
ctr.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err = ctr.Start(); err != nil {
|
||||
return nil, errors.Wrap(err, "hcsshim failed to spawn task")
|
||||
}
|
||||
|
||||
t := &task{
|
||||
id: id,
|
||||
namespace: namespace,
|
||||
pid: pid,
|
||||
io: pset,
|
||||
status: runtime.CreatedStatus,
|
||||
spec: spec,
|
||||
processes: make(map[string]*process),
|
||||
hyperV: spec.Windows.HyperV != nil,
|
||||
emitter: r.emitter,
|
||||
rwLayer: conf.LayerFolderPath,
|
||||
pidPool: r.pidPool,
|
||||
hcsContainer: ctr,
|
||||
terminateDuration: createOpts.TerminateDuration,
|
||||
}
|
||||
r.tasks.Add(ctx, t)
|
||||
|
||||
var rootfs []*containerdtypes.Mount
|
||||
for _, l := range append([]string{t.rwLayer}, spec.Windows.LayerFolders...) {
|
||||
rootfs = append(rootfs, &containerdtypes.Mount{
|
||||
Type: "windows-layer",
|
||||
Source: l,
|
||||
})
|
||||
}
|
||||
|
||||
r.emitter.Post(events.WithTopic(ctx, runtime.TaskCreateEventTopic),
|
||||
&eventsapi.TaskCreate{
|
||||
ContainerID: id,
|
||||
IO: &eventsapi.TaskIO{
|
||||
Stdin: io.Stdin,
|
||||
Stdout: io.Stdout,
|
||||
Stderr: io.Stderr,
|
||||
Terminal: io.Terminal,
|
||||
},
|
||||
Pid: t.pid,
|
||||
Rootfs: rootfs,
|
||||
// TODO: what should be in Bundle for windows?
|
||||
})
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (r *windowsRuntime) cleanup(ctx context.Context) {
|
||||
cp, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{
|
||||
Types: []string{"Container"},
|
||||
Owners: []string{hcsshimOwner},
|
||||
})
|
||||
if err != nil {
|
||||
log.G(ctx).Warn("failed to retrieve running containers")
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range cp {
|
||||
container, err := hcsshim.OpenContainer(p.ID)
|
||||
if err != nil {
|
||||
log.G(ctx).Warnf("failed open container %s", p.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
err = container.Terminate()
|
||||
if err == nil || hcsshim.IsPending(err) || hcsshim.IsAlreadyStopped(err) {
|
||||
container.Wait()
|
||||
}
|
||||
container.Close()
|
||||
|
||||
// TODO: remove this once we have a windows snapshotter
|
||||
var layerFolderPath string
|
||||
if err := r.db.View(func(tx *bolt.Tx) error {
|
||||
s := newLayerFolderStore(tx)
|
||||
l, e := s.Get(p.ID)
|
||||
if err == nil {
|
||||
layerFolderPath = l
|
||||
}
|
||||
return e
|
||||
}); err == nil && layerFolderPath != "" {
|
||||
removeLayer(ctx, layerFolderPath)
|
||||
if dbErr := r.db.Update(func(tx *bolt.Tx) error {
|
||||
s := newLayerFolderStore(tx)
|
||||
return s.Delete(p.ID)
|
||||
}); dbErr != nil {
|
||||
log.G(ctx).WithField("id", p.ID).
|
||||
Error("failed to remove key from metadata")
|
||||
}
|
||||
} else {
|
||||
log.G(ctx).WithField("id", p.ID).
|
||||
Debug("key not found in metadata, R/W layer may be leaked")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (r *windowsRuntime) serviceTask(ctx context.Context, namespace, id string, spec *specs.Spec) {
|
||||
var (
|
||||
err error
|
||||
t *task
|
||||
io runtime.IO
|
||||
createOpts = &hcsshimopts.CreateOptions{
|
||||
TerminateDuration: defaultTerminateDuration,
|
||||
}
|
||||
)
|
||||
|
||||
t, err = r.newTask(ctx, namespace, id, spec, io, createOpts)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", id).
|
||||
Warn("failed to created servicing task")
|
||||
return
|
||||
}
|
||||
t.servicing = true
|
||||
|
||||
err = t.Start(ctx)
|
||||
switch err {
|
||||
case nil:
|
||||
<-t.getProcess(id).exitCh
|
||||
default:
|
||||
log.G(ctx).WithError(err).WithField("id", id).
|
||||
Warn("failed to start servicing task")
|
||||
}
|
||||
|
||||
if _, err = r.Delete(ctx, t); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("id", id).
|
||||
Warn("failed to stop servicing task")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
441
windows/task.go
Normal file
441
windows/task.go
Normal file
@ -0,0 +1,441 @@
|
||||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
"github.com/gogo/protobuf/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type task struct {
|
||||
sync.Mutex
|
||||
|
||||
id string
|
||||
namespace string
|
||||
pid uint32
|
||||
io *pipeSet
|
||||
status runtime.Status
|
||||
spec *specs.Spec
|
||||
processes map[string]*process
|
||||
hyperV bool
|
||||
|
||||
emitter *events.Emitter
|
||||
rwLayer string
|
||||
|
||||
pidPool *pidPool
|
||||
hcsContainer hcsshim.Container
|
||||
terminateDuration time.Duration
|
||||
servicing bool
|
||||
}
|
||||
|
||||
func (t *task) ID() string {
|
||||
return t.id
|
||||
}
|
||||
|
||||
func (t *task) State(ctx context.Context) (runtime.State, error) {
|
||||
var status runtime.Status
|
||||
|
||||
if p := t.getProcess(t.id); p != nil {
|
||||
status = p.Status()
|
||||
} else {
|
||||
status = t.getStatus()
|
||||
}
|
||||
|
||||
return runtime.State{
|
||||
Status: status,
|
||||
Pid: t.pid,
|
||||
Stdin: t.io.src.Stdin,
|
||||
Stdout: t.io.src.Stdout,
|
||||
Stderr: t.io.src.Stderr,
|
||||
Terminal: t.io.src.Terminal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *task) Kill(ctx context.Context, signal uint32, all bool) error {
|
||||
p := t.getProcess(t.id)
|
||||
if p == nil {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "task is not running")
|
||||
}
|
||||
|
||||
if p.Status() == runtime.StoppedStatus {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "process is stopped")
|
||||
}
|
||||
|
||||
return p.Kill(ctx, signal, all)
|
||||
}
|
||||
|
||||
func (t *task) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
|
||||
p := t.getProcess(t.id)
|
||||
if p == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "task not started")
|
||||
}
|
||||
|
||||
return p.ResizePty(ctx, size)
|
||||
}
|
||||
|
||||
func (t *task) CloseIO(ctx context.Context) error {
|
||||
p := t.getProcess(t.id)
|
||||
if p == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "task not started")
|
||||
}
|
||||
|
||||
return p.hcs.CloseStdin()
|
||||
}
|
||||
|
||||
func (t *task) Info() runtime.TaskInfo {
|
||||
return runtime.TaskInfo{
|
||||
ID: t.id,
|
||||
Runtime: pluginID,
|
||||
Namespace: t.namespace,
|
||||
// TODO(mlaventure): what about Spec? I think this could be removed from the info, the id is enough since it matches the one from the container
|
||||
}
|
||||
}
|
||||
|
||||
func (t *task) Start(ctx context.Context) error {
|
||||
conf := newProcessConfig(t.spec.Process, t.io)
|
||||
if _, err := t.newProcess(ctx, t.id, conf, t.io); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.emitter.Post(events.WithTopic(ctx, runtime.TaskStartEventTopic),
|
||||
&eventsapi.TaskStart{
|
||||
ContainerID: t.id,
|
||||
Pid: t.pid,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *task) Pause(ctx context.Context) error {
|
||||
if t.hyperV {
|
||||
err := t.hcsContainer.Pause()
|
||||
if err == nil {
|
||||
t.Lock()
|
||||
t.status = runtime.PausedStatus
|
||||
t.Unlock()
|
||||
}
|
||||
if err == nil {
|
||||
t.emitter.Post(events.WithTopic(ctx, runtime.TaskPausedEventTopic),
|
||||
&eventsapi.TaskPaused{
|
||||
ContainerID: t.id,
|
||||
})
|
||||
}
|
||||
return errors.Wrap(err, "hcsshim failed to pause task")
|
||||
}
|
||||
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "not an hyperV task")
|
||||
}
|
||||
|
||||
func (t *task) Resume(ctx context.Context) error {
|
||||
if t.hyperV {
|
||||
err := t.hcsContainer.Resume()
|
||||
if err == nil {
|
||||
t.Lock()
|
||||
t.status = runtime.RunningStatus
|
||||
t.Unlock()
|
||||
}
|
||||
if err == nil {
|
||||
t.emitter.Post(events.WithTopic(ctx, runtime.TaskResumedEventTopic),
|
||||
&eventsapi.TaskResumed{
|
||||
ContainerID: t.id,
|
||||
})
|
||||
}
|
||||
return errors.Wrap(err, "hcsshim failed to resume task")
|
||||
}
|
||||
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "not an hyperV task")
|
||||
}
|
||||
|
||||
func (t *task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) {
|
||||
if p := t.getProcess(t.id); p == nil {
|
||||
return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "task not started")
|
||||
}
|
||||
|
||||
if p := t.getProcess(id); p != nil {
|
||||
return nil, errors.Wrap(errdefs.ErrAlreadyExists, "id already in use")
|
||||
}
|
||||
|
||||
s, err := typeurl.UnmarshalAny(opts.Spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec := s.(*specs.Process)
|
||||
if spec.Cwd == "" {
|
||||
spec.Cwd = t.spec.Process.Cwd
|
||||
}
|
||||
|
||||
var pset *pipeSet
|
||||
if pset, err = newPipeSet(ctx, opts.IO); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
pset.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
conf := newProcessConfig(spec, pset)
|
||||
p, err := t.newProcess(ctx, id, conf, pset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.emitter.Post(events.WithTopic(ctx, runtime.TaskExecAddedEventTopic),
|
||||
&eventsapi.TaskExecAdded{
|
||||
ContainerID: t.id,
|
||||
ExecID: id,
|
||||
Pid: p.Pid(),
|
||||
})
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (t *task) Pids(ctx context.Context) ([]uint32, error) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var (
|
||||
pids = make([]uint32, len(t.processes))
|
||||
idx = 0
|
||||
)
|
||||
for _, p := range t.processes {
|
||||
pids[idx] = p.Pid()
|
||||
idx++
|
||||
}
|
||||
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func (t *task) Checkpoint(_ context.Context, _ string, _ *types.Any) error {
|
||||
return errors.Wrap(errdefs.ErrUnavailable, "not supported")
|
||||
}
|
||||
|
||||
func (t *task) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, error) {
|
||||
if id == t.id {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument,
|
||||
"cannot delete init process")
|
||||
}
|
||||
if p := t.getProcess(id); p != nil {
|
||||
ec, ea, err := p.ExitCode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.removeProcess(id)
|
||||
return &runtime.Exit{
|
||||
Pid: p.pid,
|
||||
Status: ec,
|
||||
Timestamp: ea,
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "no such process %s", id)
|
||||
}
|
||||
|
||||
func (t *task) Update(ctx context.Context, resources *types.Any) error {
|
||||
return errors.Wrap(errdefs.ErrUnavailable, "not supported")
|
||||
}
|
||||
|
||||
func (t *task) Process(ctx context.Context, id string) (p runtime.Process, err error) {
|
||||
p = t.getProcess(id)
|
||||
if p == nil {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "no such process %d", id)
|
||||
}
|
||||
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (t *task) newProcess(ctx context.Context, id string, conf *hcsshim.ProcessConfig, pset *pipeSet) (*process, error) {
|
||||
var (
|
||||
err error
|
||||
pid uint32
|
||||
)
|
||||
|
||||
// If we fail, close the io right now
|
||||
defer func() {
|
||||
if err != nil {
|
||||
pset.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
t.Lock()
|
||||
if len(t.processes) == 0 {
|
||||
pid = t.pid
|
||||
} else {
|
||||
if pid, err = t.pidPool.Get(); err != nil {
|
||||
t.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
t.pidPool.Put(pid)
|
||||
}
|
||||
}()
|
||||
}
|
||||
t.Unlock()
|
||||
|
||||
var p hcsshim.Process
|
||||
if p, err = t.hcsContainer.CreateProcess(conf); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create process")
|
||||
}
|
||||
|
||||
stdin, stdout, stderr, err := p.Stdio()
|
||||
if err != nil {
|
||||
p.Kill()
|
||||
return nil, errors.Wrapf(err, "failed to retrieve init process stdio")
|
||||
}
|
||||
|
||||
ioCopy := func(name string, dst io.WriteCloser, src io.ReadCloser) {
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": id, "pid": pid}).
|
||||
Debugf("%s: copy started", name)
|
||||
io.Copy(dst, src)
|
||||
log.G(ctx).WithFields(logrus.Fields{"id": id, "pid": pid}).
|
||||
Debugf("%s: copy done", name)
|
||||
dst.Close()
|
||||
src.Close()
|
||||
}
|
||||
|
||||
if pset.stdin != nil {
|
||||
go ioCopy("stdin", stdin, pset.stdin)
|
||||
}
|
||||
|
||||
if pset.stdout != nil {
|
||||
go ioCopy("stdout", pset.stdout, stdout)
|
||||
}
|
||||
|
||||
if pset.stderr != nil {
|
||||
go ioCopy("stderr", pset.stderr, stderr)
|
||||
}
|
||||
|
||||
t.Lock()
|
||||
wp := &process{
|
||||
id: id,
|
||||
pid: pid,
|
||||
io: pset,
|
||||
status: runtime.RunningStatus,
|
||||
task: t,
|
||||
hcs: p,
|
||||
exitCh: make(chan struct{}),
|
||||
}
|
||||
t.processes[id] = wp
|
||||
t.Unlock()
|
||||
|
||||
// Wait for the process to exit to get the exit status
|
||||
go func() {
|
||||
if err := p.Wait(); err != nil {
|
||||
herr, ok := err.(*hcsshim.ProcessError)
|
||||
if ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
|
||||
log.G(ctx).
|
||||
WithError(err).
|
||||
WithFields(logrus.Fields{"id": id, "pid": pid}).
|
||||
Warnf("hcsshim wait failed (process may have been killed)")
|
||||
}
|
||||
// Try to get the exit code nonetheless
|
||||
}
|
||||
wp.exitTime = time.Now()
|
||||
|
||||
ec, err := p.ExitCode()
|
||||
if err != nil {
|
||||
log.G(ctx).
|
||||
WithError(err).
|
||||
WithFields(logrus.Fields{"id": id, "pid": pid}).
|
||||
Warnf("hcsshim could not retrieve exit code")
|
||||
// Use the unknown exit code
|
||||
ec = 255
|
||||
}
|
||||
wp.exitCode = uint32(ec)
|
||||
|
||||
t.emitter.Post(events.WithTopic(ctx, runtime.TaskExitEventTopic),
|
||||
&eventsapi.TaskExit{
|
||||
ContainerID: t.id,
|
||||
ID: id,
|
||||
Pid: pid,
|
||||
ExitStatus: wp.exitCode,
|
||||
ExitedAt: wp.exitTime,
|
||||
})
|
||||
|
||||
close(wp.exitCh)
|
||||
// Ensure io's are closed
|
||||
pset.Close()
|
||||
// Cleanup HCS resources
|
||||
p.Close()
|
||||
}()
|
||||
|
||||
return wp, nil
|
||||
}
|
||||
|
||||
func (t *task) getProcess(id string) *process {
|
||||
t.Lock()
|
||||
p := t.processes[id]
|
||||
t.Unlock()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (t *task) removeProcessNL(id string) {
|
||||
if p, ok := t.processes[id]; ok {
|
||||
if p.io != nil {
|
||||
p.io.Close()
|
||||
}
|
||||
t.pidPool.Put(p.pid)
|
||||
delete(t.processes, id)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *task) removeProcess(id string) {
|
||||
t.Lock()
|
||||
t.removeProcessNL(id)
|
||||
t.Unlock()
|
||||
}
|
||||
|
||||
func (t *task) getStatus() runtime.Status {
|
||||
t.Lock()
|
||||
status := t.status
|
||||
t.Unlock()
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
// stop tries to shutdown the task.
|
||||
// It will do so by first calling Shutdown on the hcsshim.Container and if
|
||||
// that fails, by resorting to caling Terminate
|
||||
func (t *task) stop(ctx context.Context) error {
|
||||
if err := t.hcsStop(ctx, t.hcsContainer.Shutdown); err != nil {
|
||||
return t.hcsStop(ctx, t.hcsContainer.Terminate)
|
||||
}
|
||||
t.hcsContainer.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *task) hcsStop(ctx context.Context, stop func() error) error {
|
||||
err := stop()
|
||||
switch {
|
||||
case hcsshim.IsPending(err):
|
||||
err = t.hcsContainer.WaitTimeout(t.terminateDuration)
|
||||
case hcsshim.IsAlreadyStopped(err):
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *task) cleanup() {
|
||||
t.Lock()
|
||||
for _, p := range t.processes {
|
||||
t.removeProcessNL(p.id)
|
||||
}
|
||||
removeLayer(context.Background(), t.rwLayer)
|
||||
t.Unlock()
|
||||
}
|
Loading…
Reference in New Issue
Block a user