Merge pull request #1037 from Random-Liu/support-unknown-state
Support unknown state
This commit is contained in:
commit
7c2498d2e6
@ -33,7 +33,7 @@ mkdir -p ${REPORT_DIR}
|
||||
test_setup ${REPORT_DIR}
|
||||
|
||||
# Run integration test.
|
||||
sudo ${ROOT}/_output/integration.test --test.run="${FOCUS}" --test.v \
|
||||
sudo PATH=${PATH} ${ROOT}/_output/integration.test --test.run="${FOCUS}" --test.v \
|
||||
--cri-endpoint=${CONTAINERD_SOCK} \
|
||||
--cri-root=${CRI_ROOT} \
|
||||
--runtime-handler=${RUNTIME}
|
||||
|
@ -17,8 +17,11 @@ limitations under the License.
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -192,3 +195,149 @@ func TestContainerdRestart(t *testing.T) {
|
||||
assert.True(t, found, "should find image %+v", i1)
|
||||
}
|
||||
}
|
||||
|
||||
// Note: The test moves runc binary.
|
||||
// The test requires:
|
||||
// 1) The runtime is runc;
|
||||
// 2) runc is in PATH;
|
||||
func TestUnknownStateAfterContainerdRestart(t *testing.T) {
|
||||
if *runtimeHandler != "" {
|
||||
t.Skip("unsupported config: runtime handler is set")
|
||||
}
|
||||
runcPath, err := exec.LookPath("runc")
|
||||
if err != nil {
|
||||
t.Skip("unsupported config: runc not in PATH")
|
||||
}
|
||||
|
||||
const testImage = "busybox"
|
||||
t.Logf("Pull test image %q", testImage)
|
||||
img, err := imageService.PullImage(&runtime.ImageSpec{Image: testImage}, nil)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, imageService.RemoveImage(&runtime.ImageSpec{Image: img}))
|
||||
}()
|
||||
|
||||
sbConfig := PodSandboxConfig("sandbox", "sandbox-unknown-state")
|
||||
t.Log("Should not be able to create sandbox without runc")
|
||||
tmpRuncPath := Randomize(runcPath)
|
||||
require.NoError(t, os.Rename(runcPath, tmpRuncPath))
|
||||
defer func() {
|
||||
os.Rename(tmpRuncPath, runcPath)
|
||||
}()
|
||||
sb, err := runtimeService.RunPodSandbox(sbConfig, "")
|
||||
if err == nil {
|
||||
assert.NoError(t, runtimeService.StopPodSandbox(sb))
|
||||
assert.NoError(t, runtimeService.RemovePodSandbox(sb))
|
||||
t.Skip("unsupported config: runc is not being used")
|
||||
}
|
||||
require.NoError(t, os.Rename(tmpRuncPath, runcPath))
|
||||
|
||||
t.Log("Create a sandbox")
|
||||
sb, err = runtimeService.RunPodSandbox(sbConfig, "")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
// Make sure the sandbox is cleaned up in any case.
|
||||
runtimeService.StopPodSandbox(sb)
|
||||
runtimeService.RemovePodSandbox(sb)
|
||||
}()
|
||||
ps, err := runtimeService.PodSandboxStatus(sb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, runtime.PodSandboxState_SANDBOX_READY, ps.GetState())
|
||||
|
||||
t.Log("Create a container")
|
||||
cnConfig := ContainerConfig(
|
||||
"container-unknown-state",
|
||||
testImage,
|
||||
WithCommand("sleep", "1000"),
|
||||
)
|
||||
cn, err := runtimeService.CreateContainer(sb, cnConfig, sbConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Start the container")
|
||||
require.NoError(t, runtimeService.StartContainer(cn))
|
||||
cs, err := runtimeService.ContainerStatus(cn)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, runtime.ContainerState_CONTAINER_RUNNING, cs.GetState())
|
||||
|
||||
t.Log("Move runc binary, so that container/sandbox can't be loaded after restart")
|
||||
tmpRuncPath = Randomize(runcPath)
|
||||
require.NoError(t, os.Rename(runcPath, tmpRuncPath))
|
||||
defer func() {
|
||||
os.Rename(tmpRuncPath, runcPath)
|
||||
}()
|
||||
|
||||
t.Log("Restart containerd")
|
||||
RestartContainerd(t)
|
||||
|
||||
t.Log("Sandbox should be in NOTREADY state after containerd restart")
|
||||
ps, err = runtimeService.PodSandboxStatus(sb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, runtime.PodSandboxState_SANDBOX_NOTREADY, ps.GetState())
|
||||
|
||||
t.Log("Container should be in UNKNOWN state after containerd restart")
|
||||
cs, err = runtimeService.ContainerStatus(cn)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, runtime.ContainerState_CONTAINER_UNKNOWN, cs.GetState())
|
||||
|
||||
t.Log("Stop/remove the sandbox should fail for the lack of runc")
|
||||
assert.Error(t, runtimeService.StopPodSandbox(sb))
|
||||
assert.Error(t, runtimeService.RemovePodSandbox(sb))
|
||||
|
||||
t.Log("Stop/remove the container should fail for the lack of runc")
|
||||
assert.Error(t, runtimeService.StopContainer(cn, 10))
|
||||
assert.Error(t, runtimeService.RemoveContainer(cn))
|
||||
|
||||
t.Log("Move runc back")
|
||||
require.NoError(t, os.Rename(tmpRuncPath, runcPath))
|
||||
|
||||
t.Log("Sandbox should still be in NOTREADY state")
|
||||
ps, err = runtimeService.PodSandboxStatus(sb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, runtime.PodSandboxState_SANDBOX_NOTREADY, ps.GetState())
|
||||
|
||||
t.Log("Container should still be in UNKNOWN state")
|
||||
cs, err = runtimeService.ContainerStatus(cn)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, runtime.ContainerState_CONTAINER_UNKNOWN, cs.GetState())
|
||||
|
||||
t.Log("Sandbox operations which require running state should fail")
|
||||
_, err = runtimeService.PortForward(&runtime.PortForwardRequest{
|
||||
PodSandboxId: sb,
|
||||
Port: []int32{8080},
|
||||
})
|
||||
assert.Error(t, err)
|
||||
|
||||
t.Log("Container operations which require running state should fail")
|
||||
assert.Error(t, runtimeService.ReopenContainerLog(cn))
|
||||
_, _, err = runtimeService.ExecSync(cn, []string{"ls"}, 10*time.Second)
|
||||
assert.Error(t, err)
|
||||
_, err = runtimeService.Attach(&runtime.AttachRequest{
|
||||
ContainerId: cn,
|
||||
Stdin: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
})
|
||||
assert.Error(t, err)
|
||||
|
||||
t.Log("Containerd should still be running now")
|
||||
_, err = runtimeService.Status()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log("Remove the container should fail in this state")
|
||||
assert.Error(t, runtimeService.RemoveContainer(cn))
|
||||
|
||||
t.Log("Remove the sandbox should fail in this state")
|
||||
assert.Error(t, runtimeService.RemovePodSandbox(sb))
|
||||
|
||||
t.Log("Should be able to stop container in this state")
|
||||
assert.NoError(t, runtimeService.StopContainer(cn, 10))
|
||||
|
||||
t.Log("Should be able to stop sandbox in this state")
|
||||
assert.NoError(t, runtimeService.StopPodSandbox(sb))
|
||||
|
||||
t.Log("Should be able to remove container after stop")
|
||||
assert.NoError(t, runtimeService.RemoveContainer(cn))
|
||||
|
||||
t.Log("Should be able to remove sandbox after stop")
|
||||
assert.NoError(t, runtimeService.RemovePodSandbox(sb))
|
||||
}
|
||||
|
@ -98,9 +98,12 @@ func (c *criService) RemoveContainer(ctx context.Context, r *runtime.RemoveConta
|
||||
// container will not be started or removed again.
|
||||
func setContainerRemoving(container containerstore.Container) error {
|
||||
return container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
|
||||
// Do not remove container if it's still running.
|
||||
// Do not remove container if it's still running or unknown.
|
||||
if status.State() == runtime.ContainerState_CONTAINER_RUNNING {
|
||||
return status, errors.New("container is still running")
|
||||
return status, errors.New("container is still running, to stop first")
|
||||
}
|
||||
if status.State() == runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||
return status, errors.New("container state is unknown, to stop first")
|
||||
}
|
||||
if status.Removing {
|
||||
return status, errors.New("container is already in removing state")
|
||||
|
@ -60,6 +60,15 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt
|
||||
}
|
||||
}
|
||||
status := toCRIContainerStatus(container, spec, imageRef)
|
||||
if status.GetCreatedAt() == 0 {
|
||||
// CRI doesn't allow CreatedAt == 0.
|
||||
info, err := container.Container.Info(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get CreatedAt in %q state", status.State)
|
||||
}
|
||||
status.CreatedAt = info.CreatedAt.UnixNano()
|
||||
}
|
||||
|
||||
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get verbose container info")
|
||||
|
@ -19,6 +19,8 @@ package server
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
eventtypes "github.com/containerd/containerd/api/events"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/pkg/errors"
|
||||
@ -60,8 +62,9 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
|
||||
// Return without error if container is not running. This makes sure that
|
||||
// stop only takes real action after the container is started.
|
||||
state := container.Status.Get().State()
|
||||
if state != runtime.ContainerState_CONTAINER_RUNNING {
|
||||
logrus.Infof("Container to stop %q is not running, current state %q",
|
||||
if state != runtime.ContainerState_CONTAINER_RUNNING &&
|
||||
state != runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||
logrus.Infof("Container to stop %q must be in running or unknown state, current state %q",
|
||||
id, criContainerStateToString(state))
|
||||
return nil
|
||||
}
|
||||
@ -69,9 +72,39 @@ func (c *criService) stopContainer(ctx context.Context, container containerstore
|
||||
task, err := container.Container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to stop container, task not found for container %q", id)
|
||||
return errors.Wrapf(err, "failed to get task for container %q", id)
|
||||
}
|
||||
// Don't return for unknown state, some cleanup needs to be done.
|
||||
if state != runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||
return nil
|
||||
}
|
||||
// Task is an interface, explicitly set it to nil just in case.
|
||||
task = nil
|
||||
}
|
||||
|
||||
// Handle unknown state.
|
||||
if state == runtime.ContainerState_CONTAINER_UNKNOWN {
|
||||
status, err := getTaskStatus(ctx, task)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get task status for %q", id)
|
||||
}
|
||||
switch status.Status {
|
||||
case containerd.Running, containerd.Created:
|
||||
// The task is still running, continue stopping the task.
|
||||
case containerd.Stopped:
|
||||
// The task has exited. If the task exited after containerd
|
||||
// started, the event monitor will receive its exit event; if it
|
||||
// exited before containerd started, the event monitor will never
|
||||
// receive its exit event.
|
||||
// However, we can't tell that because the task state was not
|
||||
// successfully loaded during containerd start (container is
|
||||
// in UNKNOWN state).
|
||||
// So always do cleanup here, just in case that we've missed the
|
||||
// exit event.
|
||||
return cleanupUnknownContainer(ctx, id, status, container)
|
||||
default:
|
||||
return errors.Wrapf(err, "unsupported task status %q", status.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We only need to kill the task. The event handler will Delete the
|
||||
@ -141,3 +174,21 @@ func (c *criService) waitContainerStop(ctx context.Context, container containers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupUnknownContainer cleanup stopped container in unknown state.
|
||||
func cleanupUnknownContainer(ctx context.Context, id string, status containerd.Status,
|
||||
cntr containerstore.Container) error {
|
||||
// Reuse handleContainerExit to do the cleanup.
|
||||
// NOTE(random-liu): If the task did exit after containerd started, both
|
||||
// the event monitor and the cleanup function would update the container
|
||||
// state. The final container state will be whatever being updated first.
|
||||
// There is no way to completely avoid this race condition, and for best
|
||||
// effort unknown state container cleanup, this seems acceptable.
|
||||
return handleContainerExit(ctx, &eventtypes.TaskExit{
|
||||
ContainerID: id,
|
||||
ID: id,
|
||||
Pid: 0,
|
||||
ExitStatus: status.ExitStatus,
|
||||
ExitedAt: status.ExitTime,
|
||||
}, cntr)
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
|
||||
} else if err != store.ErrNotExist {
|
||||
return errors.Wrap(err, "can't find container for TaskExit event")
|
||||
}
|
||||
// Use GetAll to include sandbox in unknown state.
|
||||
// Use GetAll to include sandbox in init state.
|
||||
sb, err := em.c.sandboxStore.GetAll(e.ID)
|
||||
if err == nil {
|
||||
if err := handleSandboxExit(ctx, e, sb); err != nil {
|
||||
@ -260,7 +260,16 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta
|
||||
// Attach container IO so that `Delete` could cleanup the stream properly.
|
||||
task, err := cntr.Container.Task(ctx,
|
||||
func(*containerdio.FIFOSet) (containerdio.IO, error) {
|
||||
return cntr.IO, nil
|
||||
// We can't directly return cntr.IO here, because
|
||||
// even if cntr.IO is nil, the cio.IO interface
|
||||
// is not.
|
||||
// See https://tour.golang.org/methods/12:
|
||||
// Note that an interface value that holds a nil
|
||||
// concrete value is itself non-nil.
|
||||
if cntr.IO != nil {
|
||||
return cntr.IO, nil
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@ -313,13 +322,13 @@ func handleSandboxExit(ctx context.Context, e *eventtypes.TaskExit, sb sandboxst
|
||||
}
|
||||
}
|
||||
err = sb.Status.Update(func(status sandboxstore.Status) (sandboxstore.Status, error) {
|
||||
// NOTE(random-liu): We SHOULD NOT change UNKNOWN state here.
|
||||
// If sandbox state is UNKNOWN when event monitor receives an TaskExit event,
|
||||
// NOTE(random-liu): We SHOULD NOT change INIT state here.
|
||||
// If sandbox state is INIT when event monitor receives an TaskExit event,
|
||||
// it means that sandbox start has failed. In that case, `RunPodSandbox` will
|
||||
// cleanup everything immediately.
|
||||
// Once sandbox state goes out of UNKNOWN, it becomes visable to the user, which
|
||||
// Once sandbox state goes out of INIT, it becomes visable to the user, which
|
||||
// is not what we want.
|
||||
if status.State != sandboxstore.StateUnknown {
|
||||
if status.State != sandboxstore.StateInit {
|
||||
status.State = sandboxstore.StateNotReady
|
||||
}
|
||||
status.Pid = 0
|
||||
|
@ -25,9 +25,12 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/runtime/linux/runctypes"
|
||||
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
"github.com/containerd/typeurl"
|
||||
@ -44,7 +47,9 @@ import (
|
||||
runtimeoptions "github.com/containerd/cri/pkg/api/runtimeoptions/v1"
|
||||
criconfig "github.com/containerd/cri/pkg/config"
|
||||
"github.com/containerd/cri/pkg/store"
|
||||
containerstore "github.com/containerd/cri/pkg/store/container"
|
||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
||||
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
|
||||
"github.com/containerd/cri/pkg/util"
|
||||
)
|
||||
|
||||
@ -524,3 +529,53 @@ func restrictOOMScoreAdj(preferredOOMScoreAdj int) (int, error) {
|
||||
}
|
||||
return preferredOOMScoreAdj, nil
|
||||
}
|
||||
|
||||
const (
|
||||
// unknownExitCode is the exit code when exit reason is unknown.
|
||||
unknownExitCode = 255
|
||||
// unknownExitReason is the exit reason when exit reason is unknown.
|
||||
unknownExitReason = "Unknown"
|
||||
)
|
||||
|
||||
// unknownContainerStatus returns the default container status when its status is unknown.
|
||||
func unknownContainerStatus() containerstore.Status {
|
||||
return containerstore.Status{
|
||||
CreatedAt: 0,
|
||||
StartedAt: 0,
|
||||
FinishedAt: 0,
|
||||
ExitCode: unknownExitCode,
|
||||
Reason: unknownExitReason,
|
||||
}
|
||||
}
|
||||
|
||||
// unknownSandboxStatus returns the default sandbox status when its status is unknown.
|
||||
func unknownSandboxStatus() sandboxstore.Status {
|
||||
return sandboxstore.Status{
|
||||
State: sandboxstore.StateUnknown,
|
||||
}
|
||||
}
|
||||
|
||||
// unknownExitStatus generates containerd.Status for container exited with unknown exit code.
|
||||
func unknownExitStatus() containerd.Status {
|
||||
return containerd.Status{
|
||||
Status: containerd.Stopped,
|
||||
ExitStatus: unknownExitCode,
|
||||
ExitTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// getTaskStatus returns status for a given task. It returns unknown exit status if
|
||||
// the task is nil or not found.
|
||||
func getTaskStatus(ctx context.Context, task containerd.Task) (containerd.Status, error) {
|
||||
if task == nil {
|
||||
return unknownExitStatus(), nil
|
||||
}
|
||||
status, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return containerd.Status{}, err
|
||||
}
|
||||
return unknownExitStatus(), nil
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
@ -179,140 +179,130 @@ func (c *criService) loadContainer(ctx context.Context, cntr containerd.Containe
|
||||
status = unknownContainerStatus()
|
||||
}
|
||||
|
||||
// Load up-to-date status from containerd.
|
||||
var containerIO *cio.ContainerIO
|
||||
t, err := cntr.Task(ctx, func(fifos *containerdio.FIFOSet) (_ containerdio.IO, err error) {
|
||||
stdoutWC, stderrWC, err := c.createContainerLoggers(meta.LogPath, meta.Config.GetTty())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = func() error {
|
||||
// Load up-to-date status from containerd.
|
||||
t, err := cntr.Task(ctx, func(fifos *containerdio.FIFOSet) (_ containerdio.IO, err error) {
|
||||
stdoutWC, stderrWC, err := c.createContainerLoggers(meta.LogPath, meta.Config.GetTty())
|
||||
if err != nil {
|
||||
if stdoutWC != nil {
|
||||
stdoutWC.Close()
|
||||
}
|
||||
if stderrWC != nil {
|
||||
stderrWC.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}()
|
||||
containerIO, err = cio.NewContainerIO(id,
|
||||
cio.WithFIFOs(fifos),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containerIO.AddOutput("log", stdoutWC, stderrWC)
|
||||
containerIO.Pipe()
|
||||
return containerIO, nil
|
||||
})
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return container, errors.Wrap(err, "failed to load task")
|
||||
}
|
||||
var s containerd.Status
|
||||
var notFound bool
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task is not found.
|
||||
notFound = true
|
||||
} else {
|
||||
// Task is found. Get task status.
|
||||
s, err = t.Status(ctx)
|
||||
if err != nil {
|
||||
// It's still possible that task is deleted during this window.
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return container, errors.Wrap(err, "failed to get task status")
|
||||
}
|
||||
notFound = true
|
||||
}
|
||||
}
|
||||
if notFound {
|
||||
// Task is not created or has been deleted, use the checkpointed status
|
||||
// to generate container status.
|
||||
switch status.State() {
|
||||
case runtime.ContainerState_CONTAINER_CREATED:
|
||||
// NOTE: Another possibility is that we've tried to start the container, but
|
||||
// containerd got restarted during that. In that case, we still
|
||||
// treat the container as `CREATED`.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if stdoutWC != nil {
|
||||
stdoutWC.Close()
|
||||
}
|
||||
if stderrWC != nil {
|
||||
stderrWC.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
containerIO, err = cio.NewContainerIO(id,
|
||||
cio.WithNewFIFOs(volatileContainerDir, meta.Config.GetTty(), meta.Config.GetStdin()),
|
||||
cio.WithFIFOs(fifos),
|
||||
)
|
||||
if err != nil {
|
||||
return container, errors.Wrap(err, "failed to create container io")
|
||||
return nil, err
|
||||
}
|
||||
case runtime.ContainerState_CONTAINER_RUNNING:
|
||||
// Container was in running state, but its task has been deleted,
|
||||
// set unknown exited state. Container io is not needed in this case.
|
||||
status.FinishedAt = time.Now().UnixNano()
|
||||
status.ExitCode = unknownExitCode
|
||||
status.Reason = unknownExitReason
|
||||
default:
|
||||
// Container is in exited/unknown state, return the status as it is.
|
||||
containerIO.AddOutput("log", stdoutWC, stderrWC)
|
||||
containerIO.Pipe()
|
||||
return containerIO, nil
|
||||
})
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return errors.Wrap(err, "failed to load task")
|
||||
}
|
||||
} else {
|
||||
// Task status is found. Update container status based on the up-to-date task status.
|
||||
switch s.Status {
|
||||
case containerd.Created:
|
||||
// Task has been created, but not started yet. This could only happen if containerd
|
||||
// gets restarted during container start.
|
||||
// Container must be in `CREATED` state.
|
||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||
return container, errors.Wrap(err, "failed to delete task")
|
||||
var s containerd.Status
|
||||
var notFound bool
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task is not found.
|
||||
notFound = true
|
||||
} else {
|
||||
// Task is found. Get task status.
|
||||
s, err = t.Status(ctx)
|
||||
if err != nil {
|
||||
// It's still possible that task is deleted during this window.
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return errors.Wrap(err, "failed to get task status")
|
||||
}
|
||||
notFound = true
|
||||
}
|
||||
if status.State() != runtime.ContainerState_CONTAINER_CREATED {
|
||||
return container, errors.Errorf("unexpected container state for created task: %q", status.State())
|
||||
}
|
||||
case containerd.Running:
|
||||
// Task is running. Container must be in `RUNNING` state, based on our assuption that
|
||||
// "task should not be started when containerd is down".
|
||||
}
|
||||
if notFound {
|
||||
// Task is not created or has been deleted, use the checkpointed status
|
||||
// to generate container status.
|
||||
switch status.State() {
|
||||
case runtime.ContainerState_CONTAINER_EXITED:
|
||||
return container, errors.Errorf("unexpected container state for running task: %q", status.State())
|
||||
case runtime.ContainerState_CONTAINER_CREATED:
|
||||
// NOTE: Another possibility is that we've tried to start the container, but
|
||||
// containerd got restarted during that. In that case, we still
|
||||
// treat the container as `CREATED`.
|
||||
containerIO, err = cio.NewContainerIO(id,
|
||||
cio.WithNewFIFOs(volatileContainerDir, meta.Config.GetTty(), meta.Config.GetStdin()),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create container io")
|
||||
}
|
||||
case runtime.ContainerState_CONTAINER_RUNNING:
|
||||
// Container was in running state, but its task has been deleted,
|
||||
// set unknown exited state. Container io is not needed in this case.
|
||||
status.FinishedAt = time.Now().UnixNano()
|
||||
status.ExitCode = unknownExitCode
|
||||
status.Reason = unknownExitReason
|
||||
default:
|
||||
// This may happen if containerd gets restarted after task is started, but
|
||||
// before status is checkpointed.
|
||||
status.StartedAt = time.Now().UnixNano()
|
||||
status.Pid = t.Pid()
|
||||
// Container is in exited/unknown state, return the status as it is.
|
||||
}
|
||||
case containerd.Stopped:
|
||||
// Task is stopped. Updata status and delete the task.
|
||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||
return container, errors.Wrap(err, "failed to delete task")
|
||||
} else {
|
||||
// Task status is found. Update container status based on the up-to-date task status.
|
||||
switch s.Status {
|
||||
case containerd.Created:
|
||||
// Task has been created, but not started yet. This could only happen if containerd
|
||||
// gets restarted during container start.
|
||||
// Container must be in `CREATED` state.
|
||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||
return errors.Wrap(err, "failed to delete task")
|
||||
}
|
||||
if status.State() != runtime.ContainerState_CONTAINER_CREATED {
|
||||
return errors.Errorf("unexpected container state for created task: %q", status.State())
|
||||
}
|
||||
case containerd.Running:
|
||||
// Task is running. Container must be in `RUNNING` state, based on our assuption that
|
||||
// "task should not be started when containerd is down".
|
||||
switch status.State() {
|
||||
case runtime.ContainerState_CONTAINER_EXITED:
|
||||
return errors.Errorf("unexpected container state for running task: %q", status.State())
|
||||
case runtime.ContainerState_CONTAINER_RUNNING:
|
||||
default:
|
||||
// This may happen if containerd gets restarted after task is started, but
|
||||
// before status is checkpointed.
|
||||
status.StartedAt = time.Now().UnixNano()
|
||||
status.Pid = t.Pid()
|
||||
}
|
||||
case containerd.Stopped:
|
||||
// Task is stopped. Updata status and delete the task.
|
||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||
return errors.Wrap(err, "failed to delete task")
|
||||
}
|
||||
status.FinishedAt = s.ExitTime.UnixNano()
|
||||
status.ExitCode = int32(s.ExitStatus)
|
||||
default:
|
||||
return errors.Errorf("unexpected task status %q", s.Status)
|
||||
}
|
||||
status.FinishedAt = s.ExitTime.UnixNano()
|
||||
status.ExitCode = int32(s.ExitStatus)
|
||||
default:
|
||||
return container, errors.Errorf("unexpected task status %q", s.Status)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to load container status for %q", id)
|
||||
status = unknownContainerStatus()
|
||||
}
|
||||
opts := []containerstore.Opts{
|
||||
containerstore.WithStatus(status, containerDir),
|
||||
containerstore.WithContainer(cntr),
|
||||
}
|
||||
// containerIO could be nil for container in unknown state.
|
||||
if containerIO != nil {
|
||||
opts = append(opts, containerstore.WithContainerIO(containerIO))
|
||||
}
|
||||
return containerstore.NewContainer(*meta, opts...)
|
||||
}
|
||||
|
||||
const (
|
||||
// unknownExitCode is the exit code when exit reason is unknown.
|
||||
unknownExitCode = 255
|
||||
// unknownExitReason is the exit reason when exit reason is unknown.
|
||||
unknownExitReason = "Unknown"
|
||||
)
|
||||
|
||||
// unknownContainerStatus returns the default container status when its status is unknown.
|
||||
func unknownContainerStatus() containerstore.Status {
|
||||
return containerstore.Status{
|
||||
CreatedAt: 0,
|
||||
StartedAt: 0,
|
||||
FinishedAt: 0,
|
||||
ExitCode: unknownExitCode,
|
||||
Reason: unknownExitReason,
|
||||
}
|
||||
}
|
||||
|
||||
// loadSandbox loads sandbox from containerd.
|
||||
func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.Sandbox, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, loadContainerTimeout)
|
||||
@ -333,61 +323,59 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
|
||||
}
|
||||
meta := data.(*sandboxstore.Metadata)
|
||||
|
||||
// Load sandbox created timestamp.
|
||||
info, err := cntr.Info(ctx)
|
||||
if err != nil {
|
||||
return sandbox, errors.Wrap(err, "failed to get sandbox container info")
|
||||
}
|
||||
createdAt := info.CreatedAt
|
||||
|
||||
// Load sandbox status.
|
||||
t, err := cntr.Task(ctx, nil)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return sandbox, errors.Wrap(err, "failed to load task")
|
||||
}
|
||||
var s containerd.Status
|
||||
var notFound bool
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task is not found.
|
||||
notFound = true
|
||||
} else {
|
||||
// Task is found. Get task status.
|
||||
s, err = t.Status(ctx)
|
||||
s, err := func() (sandboxstore.Status, error) {
|
||||
status := unknownSandboxStatus()
|
||||
// Load sandbox created timestamp.
|
||||
info, err := cntr.Info(ctx)
|
||||
if err != nil {
|
||||
// It's still possible that task is deleted during this window.
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return sandbox, errors.Wrap(err, "failed to get task status")
|
||||
}
|
||||
return status, errors.Wrap(err, "failed to get sandbox container info")
|
||||
}
|
||||
status.CreatedAt = info.CreatedAt
|
||||
|
||||
// Load sandbox state.
|
||||
t, err := cntr.Task(ctx, nil)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return status, errors.Wrap(err, "failed to load task")
|
||||
}
|
||||
var taskStatus containerd.Status
|
||||
var notFound bool
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Task is not found.
|
||||
notFound = true
|
||||
}
|
||||
}
|
||||
var state sandboxstore.State
|
||||
var pid uint32
|
||||
if notFound {
|
||||
// Task does not exist, set sandbox state as NOTREADY.
|
||||
state = sandboxstore.StateNotReady
|
||||
} else {
|
||||
if s.Status == containerd.Running {
|
||||
// Task is running, set sandbox state as READY.
|
||||
state = sandboxstore.StateReady
|
||||
pid = t.Pid()
|
||||
} else {
|
||||
// Task is not running. Delete the task and set sandbox state as NOTREADY.
|
||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||
return sandbox, errors.Wrap(err, "failed to delete task")
|
||||
// Task is found. Get task status.
|
||||
taskStatus, err = t.Status(ctx)
|
||||
if err != nil {
|
||||
// It's still possible that task is deleted during this window.
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return status, errors.Wrap(err, "failed to get task status")
|
||||
}
|
||||
notFound = true
|
||||
}
|
||||
state = sandboxstore.StateNotReady
|
||||
}
|
||||
if notFound {
|
||||
// Task does not exist, set sandbox state as NOTREADY.
|
||||
status.State = sandboxstore.StateNotReady
|
||||
} else {
|
||||
if taskStatus.Status == containerd.Running {
|
||||
// Task is running, set sandbox state as READY.
|
||||
status.State = sandboxstore.StateReady
|
||||
status.Pid = t.Pid()
|
||||
} else {
|
||||
// Task is not running. Delete the task and set sandbox state as NOTREADY.
|
||||
if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil && !errdefs.IsNotFound(err) {
|
||||
return status, errors.Wrap(err, "failed to delete task")
|
||||
}
|
||||
status.State = sandboxstore.StateNotReady
|
||||
}
|
||||
}
|
||||
return status, nil
|
||||
}()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to load sandbox status for %q", cntr.ID())
|
||||
}
|
||||
|
||||
sandbox = sandboxstore.NewSandbox(
|
||||
*meta,
|
||||
sandboxstore.Status{
|
||||
Pid: pid,
|
||||
CreatedAt: createdAt,
|
||||
State: state,
|
||||
},
|
||||
)
|
||||
sandbox = sandboxstore.NewSandbox(*meta, s)
|
||||
sandbox.Container = cntr
|
||||
|
||||
// Load network namespace.
|
||||
|
@ -63,6 +63,10 @@ func TestToCRISandbox(t *testing.T) {
|
||||
state: sandboxstore.StateNotReady,
|
||||
expectedState: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||
},
|
||||
"sandbox state unknown": {
|
||||
state: sandboxstore.StateUnknown,
|
||||
expectedState: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||
},
|
||||
} {
|
||||
status := sandboxstore.Status{
|
||||
CreatedAt: createdAt,
|
||||
|
@ -46,8 +46,9 @@ func (c *criService) RemovePodSandbox(ctx context.Context, r *runtime.RemovePodS
|
||||
// Use the full sandbox id.
|
||||
id := sandbox.ID
|
||||
|
||||
// Return error if sandbox container is still running.
|
||||
if sandbox.Status.Get().State == sandboxstore.StateReady {
|
||||
// Return error if sandbox container is still running or unknown.
|
||||
state := sandbox.Status.Get().State
|
||||
if state == sandboxstore.StateReady || state == sandboxstore.StateUnknown {
|
||||
return nil, errors.Errorf("sandbox container %q is not fully stopped", id)
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
||||
RuntimeHandler: r.GetRuntimeHandler(),
|
||||
},
|
||||
sandboxstore.Status{
|
||||
State: sandboxstore.StateUnknown,
|
||||
State: sandboxstore.StateInit,
|
||||
},
|
||||
)
|
||||
|
||||
@ -258,7 +258,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
||||
return nil, errors.Wrap(err, "failed to update sandbox created timestamp")
|
||||
}
|
||||
|
||||
// Add sandbox into sandbox store in UNKNOWN state.
|
||||
// Add sandbox into sandbox store in INIT state.
|
||||
sandbox.Container = container
|
||||
if err := c.sandboxStore.Add(sandbox); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to add sandbox %+v into store", sandbox)
|
||||
@ -269,7 +269,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
||||
c.sandboxStore.Delete(id)
|
||||
}
|
||||
}()
|
||||
// NOTE(random-liu): Sandbox state only stay in UNKNOWN state after this point
|
||||
// NOTE(random-liu): Sandbox state only stay in INIT state after this point
|
||||
// and before the end of this function.
|
||||
// * If `Update` succeeds, sandbox state will become READY in one transaction.
|
||||
// * If `Update` fails, sandbox will be removed from the store in the defer above.
|
||||
@ -279,8 +279,8 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
||||
// * If the task is running, sandbox state will be READY,
|
||||
// * Or else, sandbox state will be NOTREADY.
|
||||
//
|
||||
// In any case, sandbox will leave UNKNOWN state, so it's safe to ignore sandbox
|
||||
// in UNKNOWN state in other functions.
|
||||
// In any case, sandbox will leave INIT state, so it's safe to ignore sandbox
|
||||
// in INIT state in other functions.
|
||||
|
||||
// Start sandbox container in one transaction to avoid race condition with
|
||||
// event monitor.
|
||||
@ -293,8 +293,8 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
|
||||
// see the sandbox disappear after the defer clean up, which may confuse
|
||||
// them.
|
||||
//
|
||||
// Given so, we should keep the sandbox in UNKNOWN state if `Update` fails,
|
||||
// and ignore sandbox in UNKNOWN state in all the inspection functions.
|
||||
// Given so, we should keep the sandbox in INIT state if `Update` fails,
|
||||
// and ignore sandbox in INIT state in all the inspection functions.
|
||||
|
||||
// Create sandbox task in containerd.
|
||||
log.Tracef("Create sandbox container (id=%q, name=%q).",
|
||||
|
@ -42,6 +42,14 @@ func (c *criService) PodSandboxStatus(ctx context.Context, r *runtime.PodSandbox
|
||||
return nil, errors.Wrap(err, "failed to get sandbox ip")
|
||||
}
|
||||
status := toCRISandboxStatus(sandbox.Metadata, sandbox.Status.Get(), ip)
|
||||
if status.GetCreatedAt() == 0 {
|
||||
// CRI doesn't allow CreatedAt == 0.
|
||||
info, err := sandbox.Container.Info(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get CreatedAt for sandbox container in %q state", status.State)
|
||||
}
|
||||
status.CreatedAt = info.CreatedAt.UnixNano()
|
||||
}
|
||||
if !r.GetVerbose() {
|
||||
return &runtime.PodSandboxStatusResponse{Status: status}, nil
|
||||
}
|
||||
|
@ -86,6 +86,10 @@ func TestPodSandboxStatus(t *testing.T) {
|
||||
state: sandboxstore.StateNotReady,
|
||||
expectedState: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||
},
|
||||
"sandbox state unknown": {
|
||||
state: sandboxstore.StateUnknown,
|
||||
expectedState: runtime.PodSandboxState_SANDBOX_NOTREADY,
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase: %s", desc)
|
||||
status := sandboxstore.Status{
|
||||
|
@ -19,6 +19,8 @@ package server
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
eventtypes "github.com/containerd/containerd/api/events"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
cni "github.com/containerd/go-cni"
|
||||
"github.com/pkg/errors"
|
||||
@ -60,10 +62,11 @@ func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandb
|
||||
return nil, errors.Wrap(err, "failed to unmount sandbox files")
|
||||
}
|
||||
|
||||
// Only stop sandbox container when it's running.
|
||||
if sandbox.Status.Get().State == sandboxstore.StateReady {
|
||||
// Only stop sandbox container when it's running or unknown.
|
||||
state := sandbox.Status.Get().State
|
||||
if state == sandboxstore.StateReady || state == sandboxstore.StateUnknown {
|
||||
if err := c.stopSandboxContainer(ctx, sandbox); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to stop sandbox container %q", id)
|
||||
return nil, errors.Wrapf(err, "failed to stop sandbox container %q in %q state", id, state)
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,12 +98,36 @@ func (c *criService) StopPodSandbox(ctx context.Context, r *runtime.StopPodSandb
|
||||
// the event monitor handles the `TaskExit` event.
|
||||
func (c *criService) stopSandboxContainer(ctx context.Context, sandbox sandboxstore.Sandbox) error {
|
||||
container := sandbox.Container
|
||||
state := sandbox.Status.Get().State
|
||||
task, err := container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return errors.Wrap(err, "failed to get sandbox container")
|
||||
}
|
||||
// Don't return for unknown state, some cleanup needs to be done.
|
||||
if state != sandboxstore.StateUnknown {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to get sandbox container")
|
||||
// Task is an interface, explicitly set it to nil just in case.
|
||||
task = nil
|
||||
}
|
||||
|
||||
// Handle unknown state.
|
||||
// The cleanup logic is the same with container unknown state.
|
||||
if state == sandboxstore.StateUnknown {
|
||||
status, err := getTaskStatus(ctx, task)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get task status for %q", sandbox.ID)
|
||||
}
|
||||
switch status.Status {
|
||||
case containerd.Running, containerd.Created:
|
||||
// The task is still running, continue stopping the task.
|
||||
case containerd.Stopped:
|
||||
// The task has exited, explicitly cleanup.
|
||||
return cleanupUnknownSandbox(ctx, sandbox.ID, status, sandbox)
|
||||
default:
|
||||
return errors.Wrapf(err, "unsupported task status %q", status.Status)
|
||||
}
|
||||
}
|
||||
|
||||
// Kill the sandbox container.
|
||||
@ -137,3 +164,16 @@ func (c *criService) teardownPod(id string, path string, config *runtime.PodSand
|
||||
cni.WithLabels(labels),
|
||||
cni.WithCapabilityPortMap(toCNIPortMappings(config.GetPortMappings())))
|
||||
}
|
||||
|
||||
// cleanupUnknownSandbox cleanup stopped sandbox in unknown state.
|
||||
func cleanupUnknownSandbox(ctx context.Context, id string, status containerd.Status,
|
||||
sandbox sandboxstore.Sandbox) error {
|
||||
// Reuse handleSandboxExit to do the cleanup.
|
||||
return handleSandboxExit(ctx, &eventtypes.TaskExit{
|
||||
ContainerID: id,
|
||||
ID: id,
|
||||
Pid: 0,
|
||||
ExitStatus: status.ExitStatus,
|
||||
ExitedAt: status.ExitTime,
|
||||
}, sandbox)
|
||||
}
|
||||
|
@ -36,7 +36,8 @@ type Container struct {
|
||||
Status StatusStorage
|
||||
// Container is the containerd container client.
|
||||
Container containerd.Container
|
||||
// Container IO
|
||||
// Container IO.
|
||||
// IO could only be nil when the container is in unknown state.
|
||||
IO *cio.ContainerIO
|
||||
// StopCh is used to propagate the stop information of the container.
|
||||
*store.StopCh
|
||||
|
@ -28,6 +28,38 @@ import (
|
||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// The container state machine in the CRI plugin:
|
||||
//
|
||||
// + +
|
||||
// | |
|
||||
// | Create | Load
|
||||
// | |
|
||||
// +----v----+ |
|
||||
// | | |
|
||||
// | CREATED <---------+-----------+
|
||||
// | | | |
|
||||
// +----+----- | |
|
||||
// | | |
|
||||
// | Start | |
|
||||
// | | |
|
||||
// +----v----+ | |
|
||||
// Exec +--------+ | | |
|
||||
// Attach | | RUNNING <---------+ |
|
||||
// LogReopen +--------> | | |
|
||||
// +----+----+ | |
|
||||
// | | |
|
||||
// | Stop/Exit | |
|
||||
// | | |
|
||||
// +----v----+ | |
|
||||
// | <---------+ +----v----+
|
||||
// | EXITED | | |
|
||||
// | <----------------+ UNKNOWN |
|
||||
// +----+----+ Stop | |
|
||||
// | +---------+
|
||||
// | Remove
|
||||
// v
|
||||
// DELETED
|
||||
|
||||
// statusVersion is current version of container status.
|
||||
const statusVersion = "v1" // nolint
|
||||
|
||||
|
@ -93,7 +93,7 @@ func (s *Store) Get(id string) (Sandbox, error) {
|
||||
if err != nil {
|
||||
return sb, err
|
||||
}
|
||||
if sb.Status.Get().State == StateUnknown {
|
||||
if sb.Status.Get().State == StateInit {
|
||||
return Sandbox{}, store.ErrNotExist
|
||||
}
|
||||
return sb, nil
|
||||
@ -123,7 +123,7 @@ func (s *Store) List() []Sandbox {
|
||||
defer s.lock.RUnlock()
|
||||
var sandboxes []Sandbox
|
||||
for _, sb := range s.sandboxes {
|
||||
if sb.Status.Get().State == StateUnknown {
|
||||
if sb.Status.Get().State == StateInit {
|
||||
continue
|
||||
}
|
||||
sandboxes = append(sandboxes, sb)
|
||||
|
@ -106,7 +106,7 @@ func TestSandboxStore(t *testing.T) {
|
||||
},
|
||||
NetNSPath: "TestNetNS-3defg",
|
||||
},
|
||||
Status{State: StateUnknown},
|
||||
Status{State: StateInit},
|
||||
)
|
||||
assert := assertlib.New(t)
|
||||
s := NewStore()
|
||||
|
@ -21,16 +21,52 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// The sandbox state machine in the CRI plugin:
|
||||
// + +
|
||||
// | |
|
||||
// | Create(Run) | Load
|
||||
// | |
|
||||
// Start +----v----+ |
|
||||
// (failed) | | |
|
||||
// +-------------+ INIT | +-----------+
|
||||
// | | | | |
|
||||
// | +----+----+ | |
|
||||
// | | | |
|
||||
// | | Start(Run) | |
|
||||
// | | | |
|
||||
// | PortForward +----v----+ | |
|
||||
// | +------+ | | |
|
||||
// | | | READY <---------+ |
|
||||
// | +------> | | |
|
||||
// | +----+----+ | |
|
||||
// | | | |
|
||||
// | | Stop/Exit | |
|
||||
// | | | |
|
||||
// | +----v----+ | |
|
||||
// | | <---------+ +----v----+
|
||||
// | | NOTREADY| | |
|
||||
// | | <----------------+ UNKNOWN |
|
||||
// | +----+----+ Stop | |
|
||||
// | | +---------+
|
||||
// | | Remove
|
||||
// | v
|
||||
// +-------------> DELETED
|
||||
|
||||
// State is the sandbox state we use in containerd/cri.
|
||||
// It has unknown state defined.
|
||||
// It includes init and unknown, which are internal states not defined in CRI.
|
||||
// The state mapping from internal states to CRI states:
|
||||
// * ready -> ready
|
||||
// * not ready -> not ready
|
||||
// * init -> not exist
|
||||
// * unknown -> not ready
|
||||
type State uint32
|
||||
|
||||
const (
|
||||
// StateUnknown is unknown state of sandbox. Sandbox
|
||||
// is in unknown state before its corresponding sandbox container
|
||||
// is created. Sandbox in unknown state should be ignored by most
|
||||
// StateInit is init state of sandbox. Sandbox
|
||||
// is in init state before its corresponding sandbox container
|
||||
// is created. Sandbox in init state should be ignored by most
|
||||
// functions, unless the caller needs to update sandbox state.
|
||||
StateUnknown State = iota
|
||||
StateInit State = iota
|
||||
// StateReady is ready state, it means sandbox container
|
||||
// is running.
|
||||
StateReady
|
||||
@ -40,6 +76,9 @@ const (
|
||||
// cleanup resources other than sandbox container, e.g. network namespace.
|
||||
// This is an assumption made in CRI.
|
||||
StateNotReady
|
||||
// StateUnknown is unknown state. Sandbox only goes
|
||||
// into unknown state when its status fails to be loaded.
|
||||
StateUnknown
|
||||
)
|
||||
|
||||
// Status is the status of a sandbox.
|
||||
|
@ -28,7 +28,7 @@ func TestStatus(t *testing.T) {
|
||||
testStatus := Status{
|
||||
Pid: 123,
|
||||
CreatedAt: time.Now(),
|
||||
State: StateUnknown,
|
||||
State: StateInit,
|
||||
}
|
||||
updateStatus := Status{
|
||||
Pid: 456,
|
||||
|
12
vendor.conf
12
vendor.conf
@ -1,14 +1,14 @@
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/blang/semver v3.1.0
|
||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||
github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
|
||||
github.com/containerd/cgroups 1152b960fcee041f50df15cdc67c29dbccf801ef
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/containerd 6937c5a3ba8280edff9e9030767e3b0cb742581c
|
||||
github.com/containerd/containerd 5ba368748b0275d8f45f909413d94738992f0050
|
||||
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
||||
github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
|
||||
github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
|
||||
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
||||
github.com/containernetworking/cni v0.6.0
|
||||
github.com/containernetworking/plugins v0.7.0
|
||||
@ -32,12 +32,12 @@ github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
||||
github.com/json-iterator/go 1.1.5
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
github.com/Microsoft/go-winio v0.4.11
|
||||
github.com/Microsoft/hcsshim v0.8.2
|
||||
github.com/Microsoft/hcsshim v0.8.5
|
||||
github.com/modern-go/concurrent 1.0.3
|
||||
github.com/modern-go/reflect2 1.0.1
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/runc 12f6a991201fdb8f82579582d5e00e28fba06d0a
|
||||
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
|
||||
github.com/opencontainers/runtime-tools fb101d5d42ab9c040f7d0a004e78336e5d5cb197
|
||||
github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
|
||||
@ -60,7 +60,7 @@ go.etcd.io/bbolt v1.3.1-etcd.8
|
||||
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
|
||||
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
||||
golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2 https://github.com/golang/sys
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||
|
4
vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/hnspolicylist.go
generated
vendored
@ -37,8 +37,8 @@ func GetPolicyListByID(policyListID string) (*PolicyList, error) {
|
||||
}
|
||||
|
||||
// AddLoadBalancer policy list for the specified endpoints
|
||||
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, isDSR bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
|
||||
return hns.AddLoadBalancer(endpoints, isILB, isDSR, sourceVIP, vip, protocol, internalPort, externalPort)
|
||||
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
|
||||
return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
|
||||
}
|
||||
|
||||
// AddRoute adds route policy list for the specified endpoints
|
||||
|
17
vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go
generated
vendored
17
vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go
generated
vendored
@ -1,6 +1,8 @@
|
||||
package guestrequest
|
||||
|
||||
import "github.com/Microsoft/hcsshim/internal/schema2"
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
)
|
||||
|
||||
// Arguably, many of these (at least CombinedLayers) should have been generated
|
||||
// by swagger.
|
||||
@ -47,6 +49,19 @@ type LCOWMappedVPMemDevice struct {
|
||||
MountPath string `json:"MountPath,omitempty"` // /tmp/pN
|
||||
}
|
||||
|
||||
type LCOWNetworkAdapter struct {
|
||||
NamespaceID string `json:",omitempty"`
|
||||
ID string `json:",omitempty"`
|
||||
MacAddress string `json:",omitempty"`
|
||||
IPAddress string `json:",omitempty"`
|
||||
PrefixLength uint8 `json:",omitempty"`
|
||||
GatewayAddress string `json:",omitempty"`
|
||||
DNSSuffix string `json:",omitempty"`
|
||||
DNSServerList string `json:",omitempty"`
|
||||
EnableLowMetric bool `json:",omitempty"`
|
||||
EncapOverhead uint16 `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ResourceType string
|
||||
|
||||
const (
|
||||
|
49
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
49
vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -15,11 +16,20 @@ var (
|
||||
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
|
||||
|
||||
// Notifications for HCS_SYSTEM handles
|
||||
hcsNotificationSystemExited hcsNotification = 0x00000001
|
||||
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
|
||||
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
|
||||
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
|
||||
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
|
||||
hcsNotificationSystemExited hcsNotification = 0x00000001
|
||||
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
|
||||
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
|
||||
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
|
||||
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
|
||||
hcsNotificationSystemCrashReport hcsNotification = 0x00000006
|
||||
hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007
|
||||
hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008
|
||||
hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009
|
||||
hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A
|
||||
hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B
|
||||
hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C
|
||||
hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D
|
||||
hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E
|
||||
|
||||
// Notifications for HCS_PROCESS handles
|
||||
hcsNotificationProcessExited hcsNotification = 0x00010000
|
||||
@ -49,16 +59,23 @@ func newChannels() notificationChannels {
|
||||
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1)
|
||||
channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1)
|
||||
|
||||
return channels
|
||||
}
|
||||
|
||||
func closeChannels(channels notificationChannels) {
|
||||
close(channels[hcsNotificationSystemExited])
|
||||
close(channels[hcsNotificationSystemCreateCompleted])
|
||||
close(channels[hcsNotificationSystemStartCompleted])
|
||||
close(channels[hcsNotificationSystemPauseCompleted])
|
||||
close(channels[hcsNotificationSystemResumeCompleted])
|
||||
close(channels[hcsNotificationProcessExited])
|
||||
close(channels[hcsNotificationServiceDisconnect])
|
||||
for _, c := range channels {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
|
||||
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
|
||||
@ -75,7 +92,13 @@ func notificationWatcher(notificationType hcsNotification, callbackNumber uintpt
|
||||
return 0
|
||||
}
|
||||
|
||||
context.channels[notificationType] <- result
|
||||
if channel, ok := context.channels[notificationType]; ok {
|
||||
channel <- result
|
||||
} else {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"notification-type": notificationType,
|
||||
}).Warn("Received a callback of an unsupported type")
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
12
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -72,6 +73,9 @@ var (
|
||||
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
|
||||
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
|
||||
|
||||
// ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly
|
||||
ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106)
|
||||
|
||||
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
||||
ErrPlatformNotSupported = errors.New("unsupported platform request")
|
||||
)
|
||||
@ -116,10 +120,14 @@ func (ev *ErrorEvent) String() string {
|
||||
func processHcsResult(resultp *uint16) []ErrorEvent {
|
||||
if resultp != nil {
|
||||
resultj := interop.ConvertAndFreeCoTaskMemString(resultp)
|
||||
logrus.Debugf("Result: %s", resultj)
|
||||
logrus.WithField(logfields.JSON, resultj).
|
||||
Debug("HCS Result")
|
||||
result := &hcsResult{}
|
||||
if err := json.Unmarshal([]byte(resultj), result); err != nil {
|
||||
logrus.Warnf("Could not unmarshal HCS result %s: %s", resultj, err)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logfields.JSON: resultj,
|
||||
logrus.ErrorKey: err,
|
||||
}).Warning("Could not unmarshal HCS result")
|
||||
return nil
|
||||
}
|
||||
return result.ErrorEvents
|
||||
|
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package hcs
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
func logOperationBegin(ctx logrus.Fields, msg string) {
|
||||
logrus.WithFields(ctx).Debug(msg)
|
||||
}
|
||||
|
||||
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
|
||||
if err == nil {
|
||||
logrus.WithFields(ctx).Debug(msg)
|
||||
} else {
|
||||
logrus.WithFields(ctx).WithError(err).Error(msg)
|
||||
}
|
||||
}
|
165
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
165
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
@ -2,7 +2,6 @@ package hcs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"syscall"
|
||||
@ -10,6 +9,7 @@ import (
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -21,6 +21,21 @@ type Process struct {
|
||||
system *System
|
||||
cachedPipes *cachedPipes
|
||||
callbackNumber uintptr
|
||||
|
||||
logctx logrus.Fields
|
||||
}
|
||||
|
||||
func newProcess(process hcsProcess, processID int, computeSystem *System) *Process {
|
||||
return &Process{
|
||||
handle: process,
|
||||
processID: processID,
|
||||
system: computeSystem,
|
||||
logctx: logrus.Fields{
|
||||
logfields.HCSOperation: "",
|
||||
logfields.ContainerID: computeSystem.ID(),
|
||||
logfields.ProcessID: processID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type cachedPipes struct {
|
||||
@ -72,13 +87,36 @@ func (process *Process) SystemID() string {
|
||||
return process.system.ID()
|
||||
}
|
||||
|
||||
func (process *Process) logOperationBegin(operation string) {
|
||||
process.logctx[logfields.HCSOperation] = operation
|
||||
logOperationBegin(
|
||||
process.logctx,
|
||||
"hcsshim::Process - Begin Operation")
|
||||
}
|
||||
|
||||
func (process *Process) logOperationEnd(err error) {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
} else {
|
||||
result = "Error"
|
||||
}
|
||||
|
||||
logOperationEnd(
|
||||
process.logctx,
|
||||
"hcsshim::Process - End Operation - "+result,
|
||||
err)
|
||||
process.logctx[logfields.HCSOperation] = ""
|
||||
}
|
||||
|
||||
// Signal signals the process with `options`.
|
||||
func (process *Process) Signal(options guestrequest.SignalProcessOptions) error {
|
||||
func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "Signal"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
operation := "hcsshim::Process::Signal"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -92,83 +130,79 @@ func (process *Process) Signal(options guestrequest.SignalProcessOptions) error
|
||||
optionsStr := string(optionsb)
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("SignalProcess %s: %d", process.SystemID(), process.Pid()), &completed)
|
||||
err = hcsSignalProcess(process.handle, optionsStr, &resultp)
|
||||
completed = true
|
||||
syscallWatcher(process.logctx, func() {
|
||||
err = hcsSignalProcess(process.handle, optionsStr, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Kill signals the process to terminate but does not wait for it to finish terminating.
|
||||
func (process *Process) Kill() error {
|
||||
func (process *Process) Kill() (err error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "Kill"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
operation := "hcsshim::Process::Kill"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("TerminateProcess %s: %d", process.SystemID(), process.Pid()), &completed)
|
||||
err := hcsTerminateProcess(process.handle, &resultp)
|
||||
completed = true
|
||||
syscallWatcher(process.logctx, func() {
|
||||
err = hcsTerminateProcess(process.handle, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait waits for the process to exit.
|
||||
func (process *Process) Wait() error {
|
||||
operation := "Wait"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
func (process *Process) Wait() (err error) {
|
||||
operation := "hcsshim::Process::Wait"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
|
||||
// false if timeout occurs.
|
||||
func (process *Process) WaitTimeout(timeout time.Duration) error {
|
||||
operation := "WaitTimeout"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
|
||||
operation := "hcssshim::Process::WaitTimeout"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
|
||||
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
|
||||
if err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResizeConsole resizes the console of the process.
|
||||
func (process *Process) ResizeConsole(width, height uint16) error {
|
||||
func (process *Process) ResizeConsole(width, height uint16) (err error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "ResizeConsole"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
operation := "hcsshim::Process::ResizeConsole"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -196,16 +230,16 @@ func (process *Process) ResizeConsole(width, height uint16) error {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (process *Process) Properties() (*ProcessStatus, error) {
|
||||
func (process *Process) Properties() (_ *ProcessStatus, err error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "Properties"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
operation := "hcsshim::Process::Properties"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -215,10 +249,9 @@ func (process *Process) Properties() (*ProcessStatus, error) {
|
||||
resultp *uint16
|
||||
propertiesp *uint16
|
||||
)
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("GetProcessProperties %s: %d", process.SystemID(), process.Pid()), &completed)
|
||||
err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
|
||||
completed = true
|
||||
syscallWatcher(process.logctx, func() {
|
||||
err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, makeProcessError(process, operation, err, events)
|
||||
@ -234,14 +267,16 @@ func (process *Process) Properties() (*ProcessStatus, error) {
|
||||
return nil, makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// ExitCode returns the exit code of the process. The process must have
|
||||
// already terminated.
|
||||
func (process *Process) ExitCode() (int, error) {
|
||||
operation := "ExitCode"
|
||||
func (process *Process) ExitCode() (_ int, err error) {
|
||||
operation := "hcsshim::Process::ExitCode"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
properties, err := process.Properties()
|
||||
if err != nil {
|
||||
return 0, makeProcessError(process, operation, err, nil)
|
||||
@ -261,12 +296,13 @@ func (process *Process) ExitCode() (int, error) {
|
||||
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
|
||||
// these pipes does not close the underlying pipes; it should be possible to
|
||||
// call this multiple times to get multiple interfaces.
|
||||
func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
|
||||
func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "Stdio"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
operation := "hcsshim::Process::Stdio"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -279,7 +315,7 @@ func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, e
|
||||
processInfo hcsProcessInformation
|
||||
resultp *uint16
|
||||
)
|
||||
err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
|
||||
err = hcsGetProcessInfo(process.handle, &processInfo, &resultp)
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, nil, nil, makeProcessError(process, operation, err, events)
|
||||
@ -299,18 +335,18 @@ func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, e
|
||||
return nil, nil, nil, makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return pipes[0], pipes[1], pipes[2], nil
|
||||
}
|
||||
|
||||
// CloseStdin closes the write side of the stdin pipe so that the process is
|
||||
// notified on the read side that there is no more data in stdin.
|
||||
func (process *Process) CloseStdin() error {
|
||||
func (process *Process) CloseStdin() (err error) {
|
||||
process.handleLock.RLock()
|
||||
defer process.handleLock.RUnlock()
|
||||
operation := "CloseStdin"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
operation := "hcsshim::Process::CloseStdin"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -337,35 +373,34 @@ func (process *Process) CloseStdin() error {
|
||||
return makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the process but does not kill
|
||||
// or wait on it.
|
||||
func (process *Process) Close() error {
|
||||
func (process *Process) Close() (err error) {
|
||||
process.handleLock.Lock()
|
||||
defer process.handleLock.Unlock()
|
||||
operation := "Close"
|
||||
title := "hcsshim::Process::" + operation
|
||||
logrus.Debugf(title+" processid=%d", process.processID)
|
||||
|
||||
operation := "hcsshim::Process::Close"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
|
||||
// Don't double free this
|
||||
if process.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := process.unregisterCallback(); err != nil {
|
||||
if err = process.unregisterCallback(); err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
if err := hcsCloseProcess(process.handle); err != nil {
|
||||
if err = hcsCloseProcess(process.handle); err != nil {
|
||||
return makeProcessError(process, operation, err, nil)
|
||||
}
|
||||
|
||||
process.handle = 0
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
391
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
391
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@ -2,7 +2,6 @@ package hcs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
@ -10,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -41,16 +41,49 @@ type System struct {
|
||||
handle hcsSystem
|
||||
id string
|
||||
callbackNumber uintptr
|
||||
|
||||
logctx logrus.Fields
|
||||
}
|
||||
|
||||
func newSystem(id string) *System {
|
||||
return &System{
|
||||
id: id,
|
||||
logctx: logrus.Fields{
|
||||
logfields.HCSOperation: "",
|
||||
logfields.ContainerID: id,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (computeSystem *System) logOperationBegin(operation string) {
|
||||
computeSystem.logctx[logfields.HCSOperation] = operation
|
||||
logOperationBegin(
|
||||
computeSystem.logctx,
|
||||
"hcsshim::ComputeSystem - Begin Operation")
|
||||
}
|
||||
|
||||
func (computeSystem *System) logOperationEnd(err error) {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
} else {
|
||||
result = "Error"
|
||||
}
|
||||
|
||||
logOperationEnd(
|
||||
computeSystem.logctx,
|
||||
"hcsshim::ComputeSystem - End Operation - "+result,
|
||||
err)
|
||||
computeSystem.logctx[logfields.HCSOperation] = ""
|
||||
}
|
||||
|
||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||
func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System, error) {
|
||||
operation := "CreateComputeSystem"
|
||||
title := "hcsshim::" + operation
|
||||
func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||
operation := "hcsshim::CreateComputeSystem"
|
||||
|
||||
computeSystem := &System{
|
||||
id: id,
|
||||
}
|
||||
computeSystem := newSystem(id)
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
|
||||
if err != nil {
|
||||
@ -58,19 +91,22 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System,
|
||||
}
|
||||
|
||||
hcsDocument := string(hcsDocumentB)
|
||||
logrus.Debugf(title+" ID=%s config=%s", id, hcsDocument)
|
||||
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, hcsDocument).
|
||||
Debug("HCS ComputeSystem Document")
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
identity syscall.Handle
|
||||
resultp *uint16
|
||||
identity syscall.Handle
|
||||
createError error
|
||||
)
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("CreateCompleteSystem %s: %s", id, hcsDocument), &completed)
|
||||
createError := hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
|
||||
})
|
||||
|
||||
if createError == nil || IsPending(createError) {
|
||||
if err := computeSystem.registerCallback(); err != nil {
|
||||
if err = computeSystem.registerCallback(); err != nil {
|
||||
// Terminate the compute system if it still exists. We're okay to
|
||||
// ignore a failure here.
|
||||
computeSystem.Terminate()
|
||||
@ -88,25 +124,28 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System,
|
||||
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s handle=%d", id, computeSystem.handle)
|
||||
return computeSystem, nil
|
||||
}
|
||||
|
||||
// OpenComputeSystem opens an existing compute system by ID.
|
||||
func OpenComputeSystem(id string) (*System, error) {
|
||||
operation := "OpenComputeSystem"
|
||||
title := "hcsshim::" + operation
|
||||
logrus.Debugf(title+" ID=%s", id)
|
||||
func OpenComputeSystem(id string) (_ *System, err error) {
|
||||
operation := "hcsshim::OpenComputeSystem"
|
||||
|
||||
computeSystem := &System{
|
||||
id: id,
|
||||
}
|
||||
computeSystem := newSystem(id)
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsNotExist(err) {
|
||||
computeSystem.logOperationEnd(nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(err)
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
handle hcsSystem
|
||||
resultp *uint16
|
||||
)
|
||||
err := hcsOpenComputeSystem(id, &handle, &resultp)
|
||||
err = hcsOpenComputeSystem(id, &handle, &resultp)
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, events)
|
||||
@ -114,18 +153,36 @@ func OpenComputeSystem(id string) (*System, error) {
|
||||
|
||||
computeSystem.handle = handle
|
||||
|
||||
if err := computeSystem.registerCallback(); err != nil {
|
||||
if err = computeSystem.registerCallback(); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
|
||||
return computeSystem, nil
|
||||
}
|
||||
|
||||
// GetComputeSystems gets a list of the compute systems on the system that match the query
|
||||
func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) {
|
||||
operation := "GetComputeSystems"
|
||||
title := "hcsshim::" + operation
|
||||
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
|
||||
operation := "hcsshim::GetComputeSystems"
|
||||
fields := logrus.Fields{
|
||||
logfields.HCSOperation: operation,
|
||||
}
|
||||
logOperationBegin(
|
||||
fields,
|
||||
"hcsshim::ComputeSystem - Begin Operation")
|
||||
|
||||
defer func() {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
} else {
|
||||
result = "Error"
|
||||
}
|
||||
|
||||
logOperationEnd(
|
||||
fields,
|
||||
"hcsshim::ComputeSystem - End Operation - "+result,
|
||||
err)
|
||||
}()
|
||||
|
||||
queryb, err := json.Marshal(q)
|
||||
if err != nil {
|
||||
@ -133,16 +190,19 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerPropert
|
||||
}
|
||||
|
||||
query := string(queryb)
|
||||
logrus.Debugf(title+" query=%s", query)
|
||||
|
||||
logrus.WithFields(fields).
|
||||
WithField(logfields.JSON, query).
|
||||
Debug("HCS ComputeSystem Query")
|
||||
|
||||
var (
|
||||
resultp *uint16
|
||||
computeSystemsp *uint16
|
||||
)
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("GetComputeSystems %s:", query), &completed)
|
||||
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
|
||||
completed = true
|
||||
|
||||
syscallWatcher(fields, func() {
|
||||
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, &HcsError{Op: operation, Err: err, Events: events}
|
||||
@ -153,20 +213,21 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerPropert
|
||||
}
|
||||
computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp)
|
||||
computeSystems := []schema1.ContainerProperties{}
|
||||
if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
|
||||
if err = json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return computeSystems, nil
|
||||
}
|
||||
|
||||
// Start synchronously starts the computeSystem.
|
||||
func (computeSystem *System) Start() error {
|
||||
func (computeSystem *System) Start() (err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::ComputeSystem::Start ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Start"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
|
||||
@ -199,16 +260,14 @@ func (computeSystem *System) Start() error {
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("StartComputeSystem %s:", computeSystem.ID()), &completed)
|
||||
err := hcsStartComputeSystem(computeSystem.handle, "", &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsStartComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Start", "", err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -219,98 +278,133 @@ func (computeSystem *System) ID() string {
|
||||
|
||||
// Shutdown requests a compute system shutdown, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
func (computeSystem *System) Shutdown() error {
|
||||
func (computeSystem *System) Shutdown() (err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::ComputeSystem::Shutdown"
|
||||
logrus.Debugf(title)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Shutdown"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsAlreadyStopped(err) {
|
||||
computeSystem.logOperationEnd(nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("ShutdownComputeSystem %s:", computeSystem.ID()), &completed)
|
||||
err := hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Shutdown", "", err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Terminate requests a compute system terminate, if IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
func (computeSystem *System) Terminate() error {
|
||||
func (computeSystem *System) Terminate() (err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::ComputeSystem::Terminate ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Terminate"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsPending(err) {
|
||||
computeSystem.logOperationEnd(nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("TerminateComputeSystem %s:", computeSystem.ID()), &completed)
|
||||
err := hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
if err != nil && err != ErrVmcomputeAlreadyStopped {
|
||||
return makeSystemError(computeSystem, "Terminate", "", err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait synchronously waits for the compute system to shutdown or terminate.
|
||||
func (computeSystem *System) Wait() error {
|
||||
title := "hcsshim::ComputeSystem::Wait ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title)
|
||||
func (computeSystem *System) Wait() (err error) {
|
||||
operation := "hcsshim::ComputeSystem::Wait"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Wait", "", err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitExpectedError synchronously waits for the compute system to shutdown or
|
||||
// terminate, and ignores the passed error if it occurs.
|
||||
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
|
||||
operation := "hcsshim::ComputeSystem::WaitExpectedError"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
if err != nil && err != expected {
|
||||
return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse.
|
||||
// If the timeout expires, IsTimeout(err) == true
|
||||
func (computeSystem *System) WaitTimeout(timeout time.Duration) error {
|
||||
title := "hcsshim::ComputeSystem::WaitTimeout ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title)
|
||||
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
|
||||
operation := "hcsshim::ComputeSystem::WaitTimeout"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
|
||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "WaitTimeout", "", err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (computeSystem *System) Properties(types ...schema1.PropertyType) (*schema1.ContainerProperties, error) {
|
||||
func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Properties"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
queryj, err := json.Marshal(schema1.PropertyQuery{types})
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
|
||||
}
|
||||
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, queryj).
|
||||
Debug("HCS ComputeSystem Properties Query")
|
||||
|
||||
var resultp, propertiesp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("GetComputeSystemProperties %s:", computeSystem.ID()), &completed)
|
||||
err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "Properties", "", err, events)
|
||||
@ -324,64 +418,69 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (*schema1
|
||||
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
|
||||
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
func (computeSystem *System) Pause() error {
|
||||
func (computeSystem *System) Pause() (err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::ComputeSystem::Pause ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Pause"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("PauseComputeSystem %s:", computeSystem.ID()), &completed)
|
||||
err := hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Pause", "", err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
func (computeSystem *System) Resume() error {
|
||||
func (computeSystem *System) Resume() (err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::ComputeSystem::Resume ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Resume"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("ResumeComputeSystem %s:", computeSystem.ID()), &completed)
|
||||
err := hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
|
||||
})
|
||||
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Resume", "", err, events)
|
||||
}
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateProcess launches a new process within the computeSystem.
|
||||
func (computeSystem *System) CreateProcess(c interface{}) (*Process, error) {
|
||||
func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::ComputeSystem::CreateProcess ID=" + computeSystem.ID()
|
||||
|
||||
operation := "hcsshim::ComputeSystem::CreateProcess"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
var (
|
||||
processInfo hcsProcessInformation
|
||||
processHandle hcsProcess
|
||||
@ -398,42 +497,50 @@ func (computeSystem *System) CreateProcess(c interface{}) (*Process, error) {
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
logrus.Debugf(title+" config=%s", configuration)
|
||||
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("CreateProcess %s: %s", computeSystem.ID(), configuration), &completed)
|
||||
err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
|
||||
completed = true
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, configuration).
|
||||
Debug("HCS ComputeSystem Process Document")
|
||||
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events)
|
||||
}
|
||||
|
||||
process := &Process{
|
||||
handle: processHandle,
|
||||
processID: int(processInfo.ProcessId),
|
||||
system: computeSystem,
|
||||
cachedPipes: &cachedPipes{
|
||||
stdIn: processInfo.StdInput,
|
||||
stdOut: processInfo.StdOutput,
|
||||
stdErr: processInfo.StdError,
|
||||
},
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.ProcessID, processInfo.ProcessId).
|
||||
Debug("HCS ComputeSystem CreateProcess PID")
|
||||
|
||||
process := newProcess(processHandle, int(processInfo.ProcessId), computeSystem)
|
||||
process.cachedPipes = &cachedPipes{
|
||||
stdIn: processInfo.StdInput,
|
||||
stdOut: processInfo.StdOutput,
|
||||
stdErr: processInfo.StdError,
|
||||
}
|
||||
|
||||
if err := process.registerCallback(); err != nil {
|
||||
if err = process.registerCallback(); err != nil {
|
||||
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%d", process.processID)
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the computeSystem.
|
||||
func (computeSystem *System) OpenProcess(pid int) (*Process, error) {
|
||||
func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::ComputeSystem::OpenProcess ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title+" processid=%d", pid)
|
||||
|
||||
// Add PID for the context of this operation
|
||||
computeSystem.logctx[logfields.ProcessID] = pid
|
||||
defer delete(computeSystem.logctx, logfields.ProcessID)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::OpenProcess"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
var (
|
||||
processHandle hcsProcess
|
||||
resultp *uint16
|
||||
@ -443,56 +550,49 @@ func (computeSystem *System) OpenProcess(pid int) (*Process, error) {
|
||||
return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("OpenProcess %s: %d", computeSystem.ID(), pid), &completed)
|
||||
err := hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events)
|
||||
}
|
||||
|
||||
process := &Process{
|
||||
handle: processHandle,
|
||||
processID: pid,
|
||||
system: computeSystem,
|
||||
}
|
||||
|
||||
if err := process.registerCallback(); err != nil {
|
||||
process := newProcess(processHandle, pid, computeSystem)
|
||||
if err = process.registerCallback(); err != nil {
|
||||
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil)
|
||||
}
|
||||
|
||||
logrus.Debugf(title+" succeeded processid=%s", process.processID)
|
||||
return process, nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
|
||||
func (computeSystem *System) Close() error {
|
||||
func (computeSystem *System) Close() (err error) {
|
||||
computeSystem.handleLock.Lock()
|
||||
defer computeSystem.handleLock.Unlock()
|
||||
title := "hcsshim::ComputeSystem::Close ID=" + computeSystem.ID()
|
||||
logrus.Debugf(title)
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Close"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
// Don't double free this
|
||||
if computeSystem.handle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := computeSystem.unregisterCallback(); err != nil {
|
||||
if err = computeSystem.unregisterCallback(); err != nil {
|
||||
return makeSystemError(computeSystem, "Close", "", err, nil)
|
||||
}
|
||||
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("CloseComputeSystem %s:", computeSystem.ID()), &completed)
|
||||
err := hcsCloseComputeSystem(computeSystem.handle)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsCloseComputeSystem(computeSystem.handle)
|
||||
})
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Close", "", err, nil)
|
||||
}
|
||||
|
||||
computeSystem.handle = 0
|
||||
|
||||
logrus.Debugf(title + " succeeded")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -553,11 +653,14 @@ func (computeSystem *System) unregisterCallback() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Modifies the System by sending a request to HCS
|
||||
func (computeSystem *System) Modify(config interface{}) error {
|
||||
// Modify the System by sending a request to HCS
|
||||
func (computeSystem *System) Modify(config interface{}) (err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
title := "hcsshim::Modify ID=" + computeSystem.id
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Modify"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
|
||||
@ -569,17 +672,19 @@ func (computeSystem *System) Modify(config interface{}) error {
|
||||
}
|
||||
|
||||
requestString := string(requestJSON)
|
||||
logrus.Debugf(title + " " + requestString)
|
||||
|
||||
logrus.WithFields(computeSystem.logctx).
|
||||
WithField(logfields.JSON, requestString).
|
||||
Debug("HCS ComputeSystem Modify Document")
|
||||
|
||||
var resultp *uint16
|
||||
completed := false
|
||||
go syscallWatcher(fmt.Sprintf("ModifyComputeSystem %s: %s", computeSystem.ID(), requestString), &completed)
|
||||
err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
|
||||
completed = true
|
||||
syscallWatcher(computeSystem.logctx, func() {
|
||||
err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
|
||||
})
|
||||
events := processHcsResult(resultp)
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, "Modify", requestString, err, events)
|
||||
}
|
||||
logrus.Debugf(title + " succeeded ")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go
generated
vendored
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go
generated
vendored
@ -1,8 +1,9 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"time"
|
||||
"context"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -16,15 +17,25 @@ import (
|
||||
//
|
||||
// Usage is:
|
||||
//
|
||||
// completed := false
|
||||
// go syscallWatcher("some description", &completed)
|
||||
// <syscall>
|
||||
// completed = true
|
||||
// syscallWatcher(logContext, func() {
|
||||
// err = <syscall>(args...)
|
||||
// })
|
||||
//
|
||||
func syscallWatcher(description string, syscallCompleted *bool) {
|
||||
time.Sleep(timeout.SyscallWatcher)
|
||||
if *syscallCompleted {
|
||||
return
|
||||
}
|
||||
logrus.Warnf("%s: Did not complete within %s. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see is there is a syscall stuck in the platform API for a significant length of time.", description, timeout.SyscallWatcher)
|
||||
|
||||
func syscallWatcher(logContext logrus.Fields, syscallLambda func()) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher)
|
||||
defer cancel()
|
||||
go watchFunc(ctx, logContext)
|
||||
syscallLambda()
|
||||
}
|
||||
|
||||
func watchFunc(ctx context.Context, logContext logrus.Fields) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() != context.Canceled {
|
||||
logrus.WithFields(logContext).
|
||||
WithField(logfields.Timeout, timeout.SyscallWatcher).
|
||||
Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
123
vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go
generated
vendored
123
vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package hcs
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
@ -81,7 +80,10 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -106,7 +108,10 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -126,7 +131,10 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -137,7 +145,10 @@ func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -157,7 +168,10 @@ func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **u
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -177,7 +191,10 @@ func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -197,7 +214,10 @@ func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -217,7 +237,10 @@ func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **u
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -237,7 +260,10 @@ func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -257,7 +283,10 @@ func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -277,7 +306,10 @@ func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, res
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -288,7 +320,10 @@ func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr,
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -299,7 +334,10 @@ func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -319,7 +357,10 @@ func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, proce
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -330,7 +371,10 @@ func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, re
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -341,7 +385,10 @@ func hcsCloseProcess(process hcsProcess) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -352,7 +399,10 @@ func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -372,7 +422,10 @@ func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -383,7 +436,10 @@ func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInforma
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -394,7 +450,10 @@ func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, res
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -414,7 +473,10 @@ func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (h
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -434,7 +496,10 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -445,7 +510,10 @@ func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context ui
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -456,7 +524,10 @@ func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
4
vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go
generated
vendored
@ -36,10 +36,6 @@ func New(err error, title, rest string) error {
|
||||
return &HcsError{title, rest, err}
|
||||
}
|
||||
|
||||
func Errorf(err error, title, format string, a ...interface{}) error {
|
||||
return New(err, title, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func Win32FromError(err error) uint32 {
|
||||
if herr, ok := err.(*HcsError); ok {
|
||||
return Win32FromError(herr.Err)
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go
generated
vendored
@ -23,7 +23,9 @@ type HNSEndpoint struct {
|
||||
DisableICC bool `json:",omitempty"`
|
||||
PrefixLength uint8 `json:",omitempty"`
|
||||
IsRemoteEndpoint bool `json:",omitempty"`
|
||||
EnableLowMetric bool `json:",omitempty"`
|
||||
Namespace *Namespace `json:",omitempty"`
|
||||
EncapOverhead uint16 `json:",omitempty"`
|
||||
}
|
||||
|
||||
//SystemType represents the type of the system on which actions are done
|
||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go
generated
vendored
@ -140,7 +140,7 @@ func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList
|
||||
}
|
||||
|
||||
// AddLoadBalancer policy list for the specified endpoints
|
||||
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, isDSR bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
|
||||
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
|
||||
operation := "AddLoadBalancer"
|
||||
title := "hcsshim::PolicyList::" + operation
|
||||
logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
|
||||
@ -150,7 +150,6 @@ func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, isDSR bool, sourceVIP,
|
||||
elbPolicy := &ELBPolicy{
|
||||
SourceVIP: sourceVIP,
|
||||
ILB: isILB,
|
||||
DSR: isDSR,
|
||||
}
|
||||
|
||||
if len(vip) > 0 {
|
||||
|
8
vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package hns
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
@ -68,7 +67,10 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16)
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
4
vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go
generated
vendored
@ -5,9 +5,9 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go interop.go
|
||||
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go
|
||||
|
||||
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
|
||||
//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree
|
||||
|
||||
func ConvertAndFreeCoTaskMemString(buffer *uint16) string {
|
||||
str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:])
|
||||
|
6
vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package interop
|
||||
|
||||
@ -37,9 +37,9 @@ func errnoErr(e syscall.Errno) error {
|
||||
}
|
||||
|
||||
var (
|
||||
modole32 = windows.NewLazySystemDLL("ole32.dll")
|
||||
modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll")
|
||||
|
||||
procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
|
||||
procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree")
|
||||
)
|
||||
|
||||
func coTaskMemFree(buffer unsafe.Pointer) {
|
||||
|
37
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
Normal file
37
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package logfields
|
||||
|
||||
const (
|
||||
// Identifiers
|
||||
|
||||
ContainerID = "cid"
|
||||
UVMID = "uvm-id"
|
||||
ProcessID = "pid"
|
||||
|
||||
// Common Misc
|
||||
|
||||
// Timeout represents an operation timeout.
|
||||
Timeout = "timeout"
|
||||
JSON = "json"
|
||||
|
||||
// Keys/values
|
||||
|
||||
Field = "field"
|
||||
OCIAnnotation = "oci-annotation"
|
||||
Value = "value"
|
||||
|
||||
// Golang type's
|
||||
|
||||
ExpectedType = "expected-type"
|
||||
Bool = "bool"
|
||||
Uint32 = "uint32"
|
||||
Uint64 = "uint64"
|
||||
|
||||
// HCS
|
||||
|
||||
HCSOperation = "hcs-op"
|
||||
HCSOperationResult = "hcs-op-result"
|
||||
|
||||
// runhcs
|
||||
|
||||
VMShimOperation = "vmshim-op"
|
||||
)
|
4
vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go
generated
vendored
@ -10,7 +10,6 @@
|
||||
package hcsschema
|
||||
|
||||
type Chipset struct {
|
||||
|
||||
Uefi *Uefi `json:"Uefi,omitempty"`
|
||||
|
||||
IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"`
|
||||
@ -22,4 +21,7 @@ type Chipset struct {
|
||||
ChassisAssetTag string `json:"ChassisAssetTag,omitempty"`
|
||||
|
||||
UseUtc bool `json:"UseUtc,omitempty"`
|
||||
|
||||
// LinuxKernelDirect - Added in v2.2 Builds >=181117
|
||||
LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"`
|
||||
}
|
||||
|
18
vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go
generated
vendored
Normal file
18
vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.2
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type LinuxKernelDirect struct {
|
||||
KernelFilePath string `json:"KernelFilePath,omitempty"`
|
||||
|
||||
InitRdPath string `json:"InitRdPath,omitempty"`
|
||||
|
||||
KernelCmdLine string `json:"KernelCmdLine,omitempty"`
|
||||
}
|
7
vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go
generated
vendored
@ -20,6 +20,13 @@ type Plan9Share struct {
|
||||
|
||||
Port int32 `json:"Port,omitempty"`
|
||||
|
||||
// Flags are marked private. Until they are exported correctly
|
||||
//
|
||||
// ReadOnly 0x00000001
|
||||
// LinuxMetadata 0x00000004
|
||||
// CaseSensitive 0x00000008
|
||||
Flags int32 `json:"Flags,omitempty"`
|
||||
|
||||
ReadOnly bool `json:"ReadOnly,omitempty"`
|
||||
|
||||
UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"`
|
||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go
generated
vendored
@ -11,6 +11,9 @@ package hcsschema
|
||||
|
||||
type VirtualMachine struct {
|
||||
|
||||
// StopOnReset is private in the schema. If regenerated need to put back.
|
||||
StopOnReset bool `json:"StopOnReset,omitempty"`
|
||||
|
||||
Chipset *Chipset `json:"Chipset,omitempty"`
|
||||
|
||||
ComputeTopology *Topology `json:"ComputeTopology,omitempty"`
|
||||
|
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go
generated
vendored
@ -9,17 +9,24 @@ import (
|
||||
// For a read/write layer, the mounted filesystem will appear as a volume on the
|
||||
// host, while a read-only layer is generally expected to be a no-op.
|
||||
// An activated layer must later be deactivated via DeactivateLayer.
|
||||
func ActivateLayer(path string) error {
|
||||
title := "hcsshim::ActivateLayer "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
|
||||
err := activateLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", path)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
func ActivateLayer(path string) (err error) {
|
||||
title := "hcsshim::ActivateLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf(title+" - succeeded path=%s", path)
|
||||
err = activateLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
28
vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
generated
vendored
28
vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go
generated
vendored
@ -7,17 +7,25 @@ import (
|
||||
|
||||
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
|
||||
// the parent layer provided.
|
||||
func CreateLayer(path, parent string) error {
|
||||
title := "hcsshim::CreateLayer "
|
||||
logrus.Debugf(title+"ID %s parent %s", path, parent)
|
||||
|
||||
err := createLayer(&stdDriverInfo, path, parent)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s parent=%s", path, parent)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
func CreateLayer(path, parent string) (err error) {
|
||||
title := "hcsshim::CreateLayer"
|
||||
fields := logrus.Fields{
|
||||
"parent": parent,
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf(title+"- succeeded path=%s parent=%s", path, parent)
|
||||
err = createLayer(&stdDriverInfo, path, parent)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
23
vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
generated
vendored
23
vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go
generated
vendored
@ -9,9 +9,20 @@ import (
|
||||
// This requires both the id of the direct parent layer, as well as the full list
|
||||
// of paths to all parent layers up to the base (and including the direct parent
|
||||
// whose id was provided).
|
||||
func CreateScratchLayer(path string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::CreateScratchLayer "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
func CreateScratchLayer(path string, parentLayerPaths []string) (err error) {
|
||||
title := "hcsshim::CreateScratchLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
@ -21,11 +32,7 @@ func CreateScratchLayer(path string, parentLayerPaths []string) error {
|
||||
|
||||
err = createSandboxLayer(&stdDriverInfo, path, 0, layers)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", path)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"- succeeded path=%s", path)
|
||||
return nil
|
||||
}
|
||||
|
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go
generated
vendored
@ -6,17 +6,24 @@ import (
|
||||
)
|
||||
|
||||
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
|
||||
func DeactivateLayer(path string) error {
|
||||
title := "hcsshim::DeactivateLayer "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
|
||||
err := deactivateLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", path)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
func DeactivateLayer(path string) (err error) {
|
||||
title := "hcsshim::DeactivateLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf(title+"succeeded path=%s", path)
|
||||
err = deactivateLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+"- failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go
generated
vendored
@ -7,17 +7,24 @@ import (
|
||||
|
||||
// DestroyLayer will remove the on-disk files representing the layer with the given
|
||||
// path, including that layer's containing folder, if any.
|
||||
func DestroyLayer(path string) error {
|
||||
title := "hcsshim::DestroyLayer "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
|
||||
err := destroyLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", path)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
func DestroyLayer(path string) (err error) {
|
||||
title := "hcsshim::DestroyLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf(title+"succeeded path=%s", path)
|
||||
err = destroyLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
28
vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
generated
vendored
28
vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
generated
vendored
@ -6,17 +6,25 @@ import (
|
||||
)
|
||||
|
||||
// ExpandScratchSize expands the size of a layer to at least size bytes.
|
||||
func ExpandScratchSize(path string, size uint64) error {
|
||||
title := "hcsshim::ExpandScratchSize "
|
||||
logrus.Debugf(title+"path=%s size=%d", path, size)
|
||||
|
||||
err := expandSandboxSize(&stdDriverInfo, path, size)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s size=%d", path, size)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
func ExpandScratchSize(path string, size uint64) (err error) {
|
||||
title := "hcsshim::ExpandScratchSize"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
"size": size,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf(title+"- succeeded path=%s size=%d", path, size)
|
||||
err = expandSandboxSize(&stdDriverInfo, path, size)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
113
vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
generated
vendored
113
vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
generated
vendored
@ -1,14 +1,11 @@
|
||||
package wclayer
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -17,9 +14,21 @@ import (
|
||||
// format includes any metadata required for later importing the layer (using
|
||||
// ImportLayer), and requires the full list of parent layer paths in order to
|
||||
// perform the export.
|
||||
func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::ExportLayer "
|
||||
logrus.Debugf(title+"path %s folder %s", path, exportFolderPath)
|
||||
func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) {
|
||||
title := "hcsshim::ExportLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
"exportFolderPath": exportFolderPath,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
@ -29,12 +38,8 @@ func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string
|
||||
|
||||
err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s folder=%s", path, exportFolderPath)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded path=%s folder=%s", path, exportFolderPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -44,96 +49,20 @@ type LayerReader interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// FilterLayerReader provides an interface for extracting the contents of an on-disk layer.
|
||||
type FilterLayerReader struct {
|
||||
context uintptr
|
||||
}
|
||||
|
||||
// Next reads the next available file from a layer, ensuring that parent directories are always read
|
||||
// before child files and directories.
|
||||
//
|
||||
// Next returns the file's relative path, size, and basic file metadata. Read() should be used to
|
||||
// extract a Win32 backup stream with the remainder of the metadata and the data.
|
||||
func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) {
|
||||
var fileNamep *uint16
|
||||
fileInfo := &winio.FileBasicInfo{}
|
||||
var deleted uint32
|
||||
var fileSize int64
|
||||
err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted)
|
||||
if err != nil {
|
||||
if err == syscall.ERROR_NO_MORE_FILES {
|
||||
err = io.EOF
|
||||
} else {
|
||||
err = hcserror.New(err, "ExportLayerNext", "")
|
||||
}
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileName := interop.ConvertAndFreeCoTaskMemString(fileNamep)
|
||||
if deleted != 0 {
|
||||
fileInfo = nil
|
||||
}
|
||||
if fileName[0] == '\\' {
|
||||
fileName = fileName[1:]
|
||||
}
|
||||
return fileName, fileSize, fileInfo, nil
|
||||
}
|
||||
|
||||
// Read reads from the current file's Win32 backup stream.
|
||||
func (r *FilterLayerReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := exportLayerRead(r.context, b, &bytesRead)
|
||||
if err != nil {
|
||||
return 0, hcserror.New(err, "ExportLayerRead", "")
|
||||
}
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees resources associated with the layer reader. It will return an
|
||||
// error if there was an error while reading the layer or of the layer was not
|
||||
// completely read.
|
||||
func (r *FilterLayerReader) Close() (err error) {
|
||||
if r.context != 0 {
|
||||
err = exportLayerEnd(r.context)
|
||||
if err != nil {
|
||||
err = hcserror.New(err, "ExportLayerEnd", "")
|
||||
}
|
||||
r.context = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
|
||||
// The caller must have taken the SeBackupPrivilege privilege
|
||||
// to call this and any methods on the resulting LayerReader.
|
||||
func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) {
|
||||
if procExportLayerBegin.Find() != nil {
|
||||
// The new layer reader is not available on this Windows build. Fall back to the
|
||||
// legacy export code path.
|
||||
exportPath, err := ioutil.TempDir("", "hcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = ExportLayer(path, exportPath, parentLayerPaths)
|
||||
if err != nil {
|
||||
os.RemoveAll(exportPath)
|
||||
return nil, err
|
||||
}
|
||||
return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil
|
||||
}
|
||||
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
exportPath, err := ioutil.TempDir("", "hcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := &FilterLayerReader{}
|
||||
err = exportLayerBegin(&stdDriverInfo, path, layers, &r.context)
|
||||
err = ExportLayer(path, exportPath, parentLayerPaths)
|
||||
if err != nil {
|
||||
return nil, hcserror.New(err, "ExportLayerBegin", "")
|
||||
os.RemoveAll(exportPath)
|
||||
return nil, err
|
||||
}
|
||||
return r, err
|
||||
return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil
|
||||
}
|
||||
|
||||
type legacyLayerReaderWrapper struct {
|
||||
|
33
vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
generated
vendored
33
vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go
generated
vendored
@ -11,20 +11,29 @@ import (
|
||||
// the path at which that layer can be accessed. This path may be a volume path
|
||||
// if the layer is a mounted read-write layer, otherwise it is expected to be the
|
||||
// folder path at which the layer is stored.
|
||||
func GetLayerMountPath(path string) (string, error) {
|
||||
title := "hcsshim::GetLayerMountPath "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
func GetLayerMountPath(path string) (_ string, err error) {
|
||||
title := "hcsshim::GetLayerMountPath"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
var mountPathLength uintptr
|
||||
mountPathLength = 0
|
||||
|
||||
// Call the procedure itself.
|
||||
logrus.Debugf("Calling proc (1)")
|
||||
err := getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
|
||||
logrus.WithFields(fields).Debug("Calling proc (1)")
|
||||
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "(first call) path=%s", path)
|
||||
logrus.Error(err)
|
||||
return "", err
|
||||
return "", hcserror.New(err, title+" - failed", "(first call)")
|
||||
}
|
||||
|
||||
// Allocate a mount path of the returned length.
|
||||
@ -35,15 +44,13 @@ func GetLayerMountPath(path string) (string, error) {
|
||||
mountPathp[0] = 0
|
||||
|
||||
// Call the procedure again
|
||||
logrus.Debugf("Calling proc (2)")
|
||||
logrus.WithFields(fields).Debug("Calling proc (2)")
|
||||
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "(second call) path=%s", path)
|
||||
logrus.Error(err)
|
||||
return "", err
|
||||
return "", hcserror.New(err, title+" - failed", "(second call)")
|
||||
}
|
||||
|
||||
mountPath := syscall.UTF16ToString(mountPathp[0:])
|
||||
logrus.Debugf(title+"succeeded path=%s mountPath=%s", path, mountPath)
|
||||
fields["mountPath"] = mountPath
|
||||
return mountPath, nil
|
||||
}
|
||||
|
19
vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
generated
vendored
19
vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go
generated
vendored
@ -10,17 +10,20 @@ import (
|
||||
// image store and return descriptive info about those images for the purpose
|
||||
// of registering them with the graphdriver, graph, and tagstore.
|
||||
func GetSharedBaseImages() (imageData string, err error) {
|
||||
title := "hcsshim::GetSharedBaseImages "
|
||||
title := "hcsshim::GetSharedBaseImages"
|
||||
logrus.Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error(err)
|
||||
} else {
|
||||
logrus.WithField("imageData", imageData).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf("Calling proc")
|
||||
var buffer *uint16
|
||||
err = getBaseImages(&buffer)
|
||||
if err != nil {
|
||||
err = hcserror.New(err, title, "")
|
||||
logrus.Error(err)
|
||||
return
|
||||
return "", hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
imageData = interop.ConvertAndFreeCoTaskMemString(buffer)
|
||||
logrus.Debugf(title+" - succeeded output=%s", imageData)
|
||||
return
|
||||
return interop.ConvertAndFreeCoTaskMemString(buffer), nil
|
||||
}
|
||||
|
30
vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
generated
vendored
30
vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go
generated
vendored
@ -1,24 +1,30 @@
|
||||
package wclayer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GrantVmAccess adds access to a file for a given VM
|
||||
func GrantVmAccess(vmid string, filepath string) error {
|
||||
title := fmt.Sprintf("hcsshim::GrantVmAccess id:%s path:%s ", vmid, filepath)
|
||||
logrus.Debugf(title)
|
||||
|
||||
err := grantVmAccess(vmid, filepath)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", filepath)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
func GrantVmAccess(vmid string, filepath string) (err error) {
|
||||
title := "hcsshim::GrantVmAccess"
|
||||
fields := logrus.Fields{
|
||||
"vm-id": vmid,
|
||||
"path": filepath,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf(title + " - succeeded")
|
||||
err = grantVmAccess(vmid, filepath)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
119
vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
generated
vendored
119
vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package wclayer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -16,9 +15,21 @@ import (
|
||||
// that into a layer with the id layerId. Note that in order to correctly populate
|
||||
// the layer and interperet the transport format, all parent layers must already
|
||||
// be present on the system at the paths provided in parentLayerPaths.
|
||||
func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::ImportLayer "
|
||||
logrus.Debugf(title+"path %s folder %s", path, importFolderPath)
|
||||
func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) {
|
||||
title := "hcsshim::ImportLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
"importFolderPath": importFolderPath,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
@ -28,12 +39,8 @@ func ImportLayer(path string, importFolderPath string, parentLayerPaths []string
|
||||
|
||||
err = importLayer(&stdDriverInfo, path, importFolderPath, layers)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s folder=%s", path, importFolderPath)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded path=%s folder=%s", path, importFolderPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -52,69 +59,6 @@ type LayerWriter interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// FilterLayerWriter provides an interface to write the contents of a layer to the file system.
|
||||
type FilterLayerWriter struct {
|
||||
context uintptr
|
||||
}
|
||||
|
||||
// Add adds a file or directory to the layer. The file's parent directory must have already been added.
|
||||
//
|
||||
// name contains the file's relative path. fileInfo contains file times and file attributes; the rest
|
||||
// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method.
|
||||
// winio.BackupStreamWriter can be used to facilitate this.
|
||||
func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
|
||||
if name[0] != '\\' {
|
||||
name = `\` + name
|
||||
}
|
||||
err := importLayerNext(w.context, name, fileInfo)
|
||||
if err != nil {
|
||||
return hcserror.New(err, "ImportLayerNext", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddLink adds a hard link to the layer. The target of the link must have already been added.
|
||||
func (w *FilterLayerWriter) AddLink(name string, target string) error {
|
||||
return errors.New("hard links not yet supported")
|
||||
}
|
||||
|
||||
// Remove removes a file from the layer. The file must have been present in the parent layer.
|
||||
//
|
||||
// name contains the file's relative path.
|
||||
func (w *FilterLayerWriter) Remove(name string) error {
|
||||
if name[0] != '\\' {
|
||||
name = `\` + name
|
||||
}
|
||||
err := importLayerNext(w.context, name, nil)
|
||||
if err != nil {
|
||||
return hcserror.New(err, "ImportLayerNext", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes more backup stream data to the current file.
|
||||
func (w *FilterLayerWriter) Write(b []byte) (int, error) {
|
||||
err := importLayerWrite(w.context, b)
|
||||
if err != nil {
|
||||
err = hcserror.New(err, "ImportLayerWrite", "")
|
||||
return 0, err
|
||||
}
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
// Close completes the layer write operation. The error must be checked to ensure that the
|
||||
// operation was successful.
|
||||
func (w *FilterLayerWriter) Close() (err error) {
|
||||
if w.context != 0 {
|
||||
err = importLayerEnd(w.context)
|
||||
if err != nil {
|
||||
err = hcserror.New(err, "ImportLayerEnd", "")
|
||||
}
|
||||
w.context = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type legacyLayerWriterWrapper struct {
|
||||
*legacyLayerWriter
|
||||
path string
|
||||
@ -175,32 +119,17 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
if procImportLayerBegin.Find() != nil {
|
||||
// The new layer reader is not available on this Windows build. Fall back to the
|
||||
// legacy export code path.
|
||||
importPath, err := ioutil.TempDir("", "hcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &legacyLayerWriterWrapper{
|
||||
legacyLayerWriter: w,
|
||||
path: importPath,
|
||||
parentLayerPaths: parentLayerPaths,
|
||||
}, nil
|
||||
}
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
importPath, err := ioutil.TempDir("", "hcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &FilterLayerWriter{}
|
||||
err = importLayerBegin(&stdDriverInfo, path, layers, &w.context)
|
||||
w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path)
|
||||
if err != nil {
|
||||
return nil, hcserror.New(err, "ImportLayerStart", "")
|
||||
return nil, err
|
||||
}
|
||||
return w, nil
|
||||
return &legacyLayerWriterWrapper{
|
||||
legacyLayerWriter: w,
|
||||
path: importPath,
|
||||
parentLayerPaths: parentLayerPaths,
|
||||
}, nil
|
||||
}
|
||||
|
26
vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go
generated
vendored
@ -7,19 +7,27 @@ import (
|
||||
|
||||
// LayerExists will return true if a layer with the given id exists and is known
|
||||
// to the system.
|
||||
func LayerExists(path string) (bool, error) {
|
||||
title := "hcsshim::LayerExists "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
func LayerExists(path string) (_ bool, err error) {
|
||||
title := "hcsshim::LayerExists"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
// Call the procedure itself.
|
||||
var exists uint32
|
||||
err := layerExists(&stdDriverInfo, path, &exists)
|
||||
err = layerExists(&stdDriverInfo, path, &exists)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", path)
|
||||
logrus.Error(err)
|
||||
return false, err
|
||||
return false, hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded path=%s exists=%d", path, exists)
|
||||
fields["layer-exists"] = exists != 0
|
||||
return exists != 0, nil
|
||||
}
|
||||
|
4
vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go
generated
vendored
@ -75,13 +75,13 @@ func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR,
|
||||
for i := 0; i < len(parentLayerPaths); i++ {
|
||||
g, err := LayerID(parentLayerPaths[i])
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to convert name to guid %s", err)
|
||||
logrus.WithError(err).Debug("Failed to convert name to guid")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := syscall.UTF16PtrFromString(parentLayerPaths[i])
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err)
|
||||
logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
20
vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
generated
vendored
20
vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go
generated
vendored
@ -10,15 +10,25 @@ import (
|
||||
// Host Compute Service, ensuring GUIDs generated with the same string are common
|
||||
// across all clients.
|
||||
func NameToGuid(name string) (id guid.GUID, err error) {
|
||||
title := "hcsshim::NameToGuid "
|
||||
title := "hcsshim::NameToGuid"
|
||||
fields := logrus.Fields{
|
||||
"name": name,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
err = nameToGuid(name, &id)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "name=%s", name)
|
||||
logrus.Error(err)
|
||||
err = hcserror.New(err, title+" - failed", "")
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"name:%s guid:%s", name, id.String())
|
||||
fields["guid"] = id.String()
|
||||
return
|
||||
}
|
||||
|
23
vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
generated
vendored
23
vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go
generated
vendored
@ -14,9 +14,20 @@ var prepareLayerLock sync.Mutex
|
||||
// parent layers, and is necessary in order to view or interact with the layer
|
||||
// as an actual filesystem (reading and writing files, creating directories, etc).
|
||||
// Disabling the filter must be done via UnprepareLayer.
|
||||
func PrepareLayer(path string, parentLayerPaths []string) error {
|
||||
title := "hcsshim::PrepareLayer "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
func PrepareLayer(path string, parentLayerPaths []string) (err error) {
|
||||
title := "hcsshim::PrepareLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate layer descriptors
|
||||
layers, err := layerPathsToDescriptors(parentLayerPaths)
|
||||
@ -30,11 +41,7 @@ func PrepareLayer(path string, parentLayerPaths []string) error {
|
||||
defer prepareLayerLock.Unlock()
|
||||
err = prepareLayer(&stdDriverInfo, path, layers)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", path)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
|
||||
logrus.Debugf(title+"succeeded path=%s", path)
|
||||
return nil
|
||||
}
|
||||
|
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go
generated
vendored
@ -7,17 +7,24 @@ import (
|
||||
|
||||
// UnprepareLayer disables the filesystem filter for the read-write layer with
|
||||
// the given id.
|
||||
func UnprepareLayer(path string) error {
|
||||
title := "hcsshim::UnprepareLayer "
|
||||
logrus.Debugf(title+"path %s", path)
|
||||
|
||||
err := unprepareLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
err = hcserror.Errorf(err, title, "path=%s", path)
|
||||
logrus.Error(err)
|
||||
return err
|
||||
func UnprepareLayer(path string) (err error) {
|
||||
title := "hcsshim::UnprepareLayer"
|
||||
fields := logrus.Fields{
|
||||
"path": path,
|
||||
}
|
||||
logrus.WithFields(fields).Debug(title)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fields[logrus.ErrorKey] = err
|
||||
logrus.WithFields(fields).Error(err)
|
||||
} else {
|
||||
logrus.WithFields(fields).Debug(title + " - succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf(title+"succeeded path=%s", path)
|
||||
err = unprepareLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
12
vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go
generated
vendored
@ -2,7 +2,7 @@ package wclayer
|
||||
|
||||
import "github.com/Microsoft/hcsshim/internal/guid"
|
||||
|
||||
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go -winio wclayer.go
|
||||
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go
|
||||
|
||||
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
|
||||
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
|
||||
@ -22,16 +22,6 @@ import "github.com/Microsoft/hcsshim/internal/guid"
|
||||
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
|
||||
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
|
||||
|
||||
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
|
||||
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
|
||||
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
|
||||
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
|
||||
|
||||
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
|
||||
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
|
||||
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
|
||||
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
|
||||
|
||||
//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess?
|
||||
|
||||
type _guid = guid.GUID
|
||||
|
233
vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
generated
vendored
233
vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package wclayer
|
||||
|
||||
@ -6,8 +6,6 @@ import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
@ -58,14 +56,6 @@ var (
|
||||
procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer")
|
||||
procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage")
|
||||
procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage")
|
||||
procImportLayerBegin = modvmcompute.NewProc("ImportLayerBegin")
|
||||
procImportLayerNext = modvmcompute.NewProc("ImportLayerNext")
|
||||
procImportLayerWrite = modvmcompute.NewProc("ImportLayerWrite")
|
||||
procImportLayerEnd = modvmcompute.NewProc("ImportLayerEnd")
|
||||
procExportLayerBegin = modvmcompute.NewProc("ExportLayerBegin")
|
||||
procExportLayerNext = modvmcompute.NewProc("ExportLayerNext")
|
||||
procExportLayerRead = modvmcompute.NewProc("ExportLayerRead")
|
||||
procExportLayerEnd = modvmcompute.NewProc("ExportLayerEnd")
|
||||
procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess")
|
||||
)
|
||||
|
||||
@ -84,7 +74,10 @@ func _activateLayer(info *driverInfo, id *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -113,7 +106,10 @@ func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -138,7 +134,10 @@ func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -162,7 +161,10 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -182,7 +184,10 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -202,7 +207,10 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -222,7 +230,10 @@ func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -251,7 +262,10 @@ func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -271,7 +285,10 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -282,7 +299,10 @@ func getBaseImages(buffer **uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -311,7 +331,10 @@ func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -331,7 +354,10 @@ func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -351,7 +377,10 @@ func _nameToGuid(name *uint16, guid *_guid) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -375,7 +404,10 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -395,7 +427,10 @@ func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -415,7 +450,10 @@ func _processBaseImage(path *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -435,138 +473,10 @@ func _processUtilityImage(path *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(id)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _importLayerBegin(info, _p0, descriptors, context)
|
||||
}
|
||||
|
||||
func _importLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
|
||||
var _p1 *WC_LAYER_DESCRIPTOR
|
||||
if len(descriptors) > 0 {
|
||||
_p1 = &descriptors[0]
|
||||
}
|
||||
if hr = procImportLayerBegin.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procImportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(fileName)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _importLayerNext(context, _p0, fileInfo)
|
||||
}
|
||||
|
||||
func _importLayerNext(context uintptr, fileName *uint16, fileInfo *winio.FileBasicInfo) (hr error) {
|
||||
if hr = procImportLayerNext.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procImportLayerNext.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func importLayerWrite(context uintptr, buffer []byte) (hr error) {
|
||||
var _p0 *byte
|
||||
if len(buffer) > 0 {
|
||||
_p0 = &buffer[0]
|
||||
}
|
||||
if hr = procImportLayerWrite.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procImportLayerWrite.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)))
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func importLayerEnd(context uintptr) (hr error) {
|
||||
if hr = procImportLayerEnd.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procImportLayerEnd.Addr(), 1, uintptr(context), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(id)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _exportLayerBegin(info, _p0, descriptors, context)
|
||||
}
|
||||
|
||||
func _exportLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
|
||||
var _p1 *WC_LAYER_DESCRIPTOR
|
||||
if len(descriptors) > 0 {
|
||||
_p1 = &descriptors[0]
|
||||
}
|
||||
if hr = procExportLayerBegin.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procExportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) {
|
||||
if hr = procExportLayerNext.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procExportLayerNext.Addr(), 5, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)), uintptr(unsafe.Pointer(fileSize)), uintptr(unsafe.Pointer(deleted)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) {
|
||||
var _p0 *byte
|
||||
if len(buffer) > 0 {
|
||||
_p0 = &buffer[0]
|
||||
}
|
||||
if hr = procExportLayerRead.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procExportLayerRead.Addr(), 4, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)), uintptr(unsafe.Pointer(bytesRead)), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func exportLayerEnd(context uintptr) (hr error) {
|
||||
if hr = procExportLayerEnd.Find(); hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procExportLayerEnd.Addr(), 1, uintptr(context), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -591,7 +501,10 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
4
vendor/github.com/Microsoft/hcsshim/layer.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/layer.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guid"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
)
|
||||
|
||||
@ -74,9 +73,6 @@ type DriverInfo struct {
|
||||
HomeDir string
|
||||
}
|
||||
|
||||
type FilterLayerReader = wclayer.FilterLayerReader
|
||||
type FilterLayerWriter = wclayer.FilterLayerWriter
|
||||
|
||||
type GUID [16]byte
|
||||
|
||||
func NameToGuid(name string) (id GUID, err error) {
|
||||
|
21
vendor/github.com/Microsoft/hcsshim/vendor.conf
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/vendor.conf
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
github.com/blang/semver v3.1.0
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
||||
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
|
||||
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
||||
github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c
|
||||
github.com/Microsoft/go-winio 16cfc975803886a5e47c4257a24c8d8c52e178b2
|
||||
github.com/Microsoft/opengcs v0.3.9
|
||||
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
|
||||
github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.3.0
|
||||
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
|
||||
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
|
||||
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908
|
||||
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f
|
||||
golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb
|
8
vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package hcsshim
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
@ -46,7 +45,10 @@ var (
|
||||
func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = interop.Win32FromHresult(r0)
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
14
vendor/github.com/containerd/cgroups/README.md
generated
vendored
14
vendor/github.com/containerd/cgroups/README.md
generated
vendored
@ -1,8 +1,9 @@
|
||||
# cgroups
|
||||
|
||||
[](https://travis-ci.org/containerd/cgroups)
|
||||
|
||||
[](https://codecov.io/gh/containerd/cgroups)
|
||||
[](https://godoc.org/github.com/containerd/cgroups)
|
||||
[](https://goreportcard.com/report/github.com/containerd/cgroups)
|
||||
|
||||
Go package for creating, managing, inspecting, and destroying cgroups.
|
||||
The resources format for settings on the cgroup uses the OCI runtime-spec found
|
||||
@ -110,3 +111,14 @@ err := control.MoveTo(destination)
|
||||
```go
|
||||
subCgroup, err := control.New("child", resources)
|
||||
```
|
||||
|
||||
## Project details
|
||||
|
||||
Cgroups is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
57
vendor/github.com/containerd/cgroups/blkio.go
generated
vendored
57
vendor/github.com/containerd/cgroups/blkio.go
generated
vendored
@ -191,31 +191,42 @@ func (b *blkioController) readEntry(devices map[deviceKey]string, path, name str
|
||||
}
|
||||
|
||||
func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {
|
||||
settings := []blkioSettings{
|
||||
{
|
||||
name: "weight",
|
||||
value: blkio.Weight,
|
||||
format: uintf,
|
||||
},
|
||||
{
|
||||
name: "leaf_weight",
|
||||
value: blkio.LeafWeight,
|
||||
format: uintf,
|
||||
},
|
||||
}
|
||||
for _, wd := range blkio.WeightDevice {
|
||||
settings := []blkioSettings{}
|
||||
|
||||
if blkio.Weight != nil {
|
||||
settings = append(settings,
|
||||
blkioSettings{
|
||||
name: "weight_device",
|
||||
value: wd,
|
||||
format: weightdev,
|
||||
},
|
||||
blkioSettings{
|
||||
name: "leaf_weight_device",
|
||||
value: wd,
|
||||
format: weightleafdev,
|
||||
name: "weight",
|
||||
value: blkio.Weight,
|
||||
format: uintf,
|
||||
})
|
||||
}
|
||||
if blkio.LeafWeight != nil {
|
||||
settings = append(settings,
|
||||
blkioSettings{
|
||||
name: "leaf_weight",
|
||||
value: blkio.LeafWeight,
|
||||
format: uintf,
|
||||
})
|
||||
}
|
||||
for _, wd := range blkio.WeightDevice {
|
||||
if wd.Weight != nil {
|
||||
settings = append(settings,
|
||||
blkioSettings{
|
||||
name: "weight_device",
|
||||
value: wd,
|
||||
format: weightdev,
|
||||
})
|
||||
}
|
||||
if wd.LeafWeight != nil {
|
||||
settings = append(settings,
|
||||
blkioSettings{
|
||||
name: "leaf_weight_device",
|
||||
value: wd,
|
||||
format: weightleafdev,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, t := range []struct {
|
||||
name string
|
||||
list []specs.LinuxThrottleDevice
|
||||
@ -265,12 +276,12 @@ func uintf(v interface{}) []byte {
|
||||
|
||||
func weightdev(v interface{}) []byte {
|
||||
wd := v.(specs.LinuxWeightDevice)
|
||||
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight))
|
||||
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.Weight))
|
||||
}
|
||||
|
||||
func weightleafdev(v interface{}) []byte {
|
||||
wd := v.(specs.LinuxWeightDevice)
|
||||
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight))
|
||||
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.LeafWeight))
|
||||
}
|
||||
|
||||
func throttleddev(v interface{}) []byte {
|
||||
|
51
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
51
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
@ -48,11 +48,12 @@ func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources) (Cgrou
|
||||
|
||||
// Load will load an existing cgroup and allow it to be controlled
|
||||
func Load(hierarchy Hierarchy, path Path) (Cgroup, error) {
|
||||
var activeSubsystems []Subsystem
|
||||
subsystems, err := hierarchy()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check the the subsystems still exist
|
||||
// check that the subsystems still exist, and keep only those that actually exist
|
||||
for _, s := range pathers(subsystems) {
|
||||
p, err := path(s.Name())
|
||||
if err != nil {
|
||||
@ -63,14 +64,15 @@ func Load(hierarchy Hierarchy, path Path) (Cgroup, error) {
|
||||
}
|
||||
if _, err := os.Lstat(s.Path(p)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrCgroupDeleted
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
activeSubsystems = append(activeSubsystems, s)
|
||||
}
|
||||
return &cgroup{
|
||||
path: path,
|
||||
subsystems: subsystems,
|
||||
subsystems: activeSubsystems,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -319,6 +321,49 @@ func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) {
|
||||
return processes, err
|
||||
}
|
||||
|
||||
// Tasks returns the tasks running inside the cgroup along
|
||||
// with the subsystem used, pid, and path
|
||||
func (c *cgroup) Tasks(subsystem Name, recursive bool) ([]Task, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err != nil {
|
||||
return nil, c.err
|
||||
}
|
||||
return c.tasks(subsystem, recursive)
|
||||
}
|
||||
|
||||
func (c *cgroup) tasks(subsystem Name, recursive bool) ([]Task, error) {
|
||||
s := c.getSubsystem(subsystem)
|
||||
sp, err := c.path(subsystem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path := s.(pather).Path(sp)
|
||||
var tasks []Task
|
||||
err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !recursive && info.IsDir() {
|
||||
if p == path {
|
||||
return nil
|
||||
}
|
||||
return filepath.SkipDir
|
||||
}
|
||||
dir, name := filepath.Split(p)
|
||||
if name != cgroupTasks {
|
||||
return nil
|
||||
}
|
||||
procs, err := readTasksPids(dir, subsystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tasks = append(tasks, procs...)
|
||||
return nil
|
||||
})
|
||||
return tasks, err
|
||||
}
|
||||
|
||||
// Freeze freezes the entire cgroup and all the processes inside it
|
||||
func (c *cgroup) Freeze() error {
|
||||
c.mu.Lock()
|
||||
|
11
vendor/github.com/containerd/cgroups/control.go
generated
vendored
11
vendor/github.com/containerd/cgroups/control.go
generated
vendored
@ -44,6 +44,15 @@ type Process struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
type Task struct {
|
||||
// Subsystem is the name of the subsystem that the task is in
|
||||
Subsystem Name
|
||||
// Pid is the process id of the task
|
||||
Pid int
|
||||
// Path is the full path of the subsystem and location that the task is in
|
||||
Path string
|
||||
}
|
||||
|
||||
// Cgroup handles interactions with the individual groups to perform
|
||||
// actions on them as them main interface to this cgroup package
|
||||
type Cgroup interface {
|
||||
@ -64,6 +73,8 @@ type Cgroup interface {
|
||||
Update(resources *specs.LinuxResources) error
|
||||
// Processes returns all the processes in a select subsystem for the cgroup
|
||||
Processes(Name, bool) ([]Process, error)
|
||||
// Tasks returns all the tasks in a select subsystem for the cgroup
|
||||
Tasks(Name, bool) ([]Task, error)
|
||||
// Freeze freezes or pauses all processes inside the cgroup
|
||||
Freeze() error
|
||||
// Thaw thaw or resumes all processes inside the cgroup
|
||||
|
10
vendor/github.com/containerd/cgroups/cpuset.go
generated
vendored
10
vendor/github.com/containerd/cgroups/cpuset.go
generated
vendored
@ -57,21 +57,21 @@ func (c *cpusetController) Create(path string, resources *specs.LinuxResources)
|
||||
if resources.CPU != nil {
|
||||
for _, t := range []struct {
|
||||
name string
|
||||
value *string
|
||||
value string
|
||||
}{
|
||||
{
|
||||
name: "cpus",
|
||||
value: &resources.CPU.Cpus,
|
||||
value: resources.CPU.Cpus,
|
||||
},
|
||||
{
|
||||
name: "mems",
|
||||
value: &resources.CPU.Mems,
|
||||
value: resources.CPU.Mems,
|
||||
},
|
||||
} {
|
||||
if t.value != nil {
|
||||
if t.value != "" {
|
||||
if err := ioutil.WriteFile(
|
||||
filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)),
|
||||
[]byte(*t.value),
|
||||
[]byte(t.value),
|
||||
defaultFilePerm,
|
||||
); err != nil {
|
||||
return err
|
||||
|
3
vendor/github.com/containerd/cgroups/devices.go
generated
vendored
3
vendor/github.com/containerd/cgroups/devices.go
generated
vendored
@ -58,6 +58,9 @@ func (d *devicesController) Create(path string, resources *specs.LinuxResources)
|
||||
if device.Allow {
|
||||
file = allowDeviceFile
|
||||
}
|
||||
if device.Type == "" {
|
||||
device.Type = "a"
|
||||
}
|
||||
if err := ioutil.WriteFile(
|
||||
filepath.Join(d.Path(path), file),
|
||||
[]byte(deviceString(device)),
|
||||
|
2
vendor/github.com/containerd/cgroups/net_prio.go
generated
vendored
2
vendor/github.com/containerd/cgroups/net_prio.go
generated
vendored
@ -50,7 +50,7 @@ func (n *netprioController) Create(path string, resources *specs.LinuxResources)
|
||||
if resources.Network != nil {
|
||||
for _, prio := range resources.Network.Priorities {
|
||||
if err := ioutil.WriteFile(
|
||||
filepath.Join(n.Path(path), "net_prio_ifpriomap"),
|
||||
filepath.Join(n.Path(path), "net_prio.ifpriomap"),
|
||||
formatPrio(prio.Name, prio.Priority),
|
||||
defaultFilePerm,
|
||||
); err != nil {
|
||||
|
2
vendor/github.com/containerd/cgroups/subsystem.go
generated
vendored
2
vendor/github.com/containerd/cgroups/subsystem.go
generated
vendored
@ -42,7 +42,7 @@ const (
|
||||
)
|
||||
|
||||
// Subsystems returns a complete list of the default cgroups
|
||||
// avaliable on most linux systems
|
||||
// available on most linux systems
|
||||
func Subsystems() []Name {
|
||||
n := []Name{
|
||||
Hugetlb,
|
||||
|
33
vendor/github.com/containerd/cgroups/systemd.go
generated
vendored
33
vendor/github.com/containerd/cgroups/systemd.go
generated
vendored
@ -32,6 +32,11 @@ const (
|
||||
defaultSlice = "system.slice"
|
||||
)
|
||||
|
||||
var (
|
||||
canDelegate bool
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
func Systemd() ([]Subsystem, error) {
|
||||
root, err := v1MountPoint()
|
||||
if err != nil {
|
||||
@ -54,7 +59,7 @@ func Slice(slice, name string) Path {
|
||||
slice = defaultSlice
|
||||
}
|
||||
return func(subsystem Name) (string, error) {
|
||||
return filepath.Join(slice, unitName(name)), nil
|
||||
return filepath.Join(slice, name), nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,15 +85,39 @@ func (s *SystemdController) Create(path string, resources *specs.LinuxResources)
|
||||
}
|
||||
defer conn.Close()
|
||||
slice, name := splitName(path)
|
||||
// We need to see if systemd can handle the delegate property
|
||||
// Systemd will return an error if it cannot handle delegate regardless
|
||||
// of its bool setting.
|
||||
checkDelegate := func() {
|
||||
canDelegate = true
|
||||
dlSlice := newProperty("Delegate", true)
|
||||
if _, err := conn.StartTransientUnit(slice, "testdelegate", []systemdDbus.Property{dlSlice}, nil); err != nil {
|
||||
if dbusError, ok := err.(dbus.Error); ok {
|
||||
// Starting with systemd v237, Delegate is not even a property of slices anymore,
|
||||
// so the D-Bus call fails with "InvalidArgs" error.
|
||||
if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") || strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.InvalidArgs") {
|
||||
canDelegate = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
conn.StopUnit(slice, "testDelegate", nil)
|
||||
}
|
||||
once.Do(checkDelegate)
|
||||
properties := []systemdDbus.Property{
|
||||
systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
|
||||
systemdDbus.PropWants(slice),
|
||||
newProperty("DefaultDependencies", false),
|
||||
newProperty("Delegate", true),
|
||||
newProperty("MemoryAccounting", true),
|
||||
newProperty("CPUAccounting", true),
|
||||
newProperty("BlockIOAccounting", true),
|
||||
}
|
||||
|
||||
// If we can delegate, we add the property back in
|
||||
if canDelegate {
|
||||
properties = append(properties, newProperty("Delegate", true))
|
||||
}
|
||||
|
||||
ch := make(chan string)
|
||||
_, err = conn.StartTransientUnit(name, "replace", properties, ch)
|
||||
if err != nil {
|
||||
|
29
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
29
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
@ -111,7 +111,7 @@ func remove(path string) error {
|
||||
return fmt.Errorf("cgroups: unable to remove path %q", path)
|
||||
}
|
||||
|
||||
// readPids will read all the pids in a cgroup by the provided path
|
||||
// readPids will read all the pids of processes in a cgroup by the provided path
|
||||
func readPids(path string, subsystem Name) ([]Process, error) {
|
||||
f, err := os.Open(filepath.Join(path, cgroupProcs))
|
||||
if err != nil {
|
||||
@ -138,6 +138,33 @@ func readPids(path string, subsystem Name) ([]Process, error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// readTasksPids will read all the pids of tasks in a cgroup by the provided path
|
||||
func readTasksPids(path string, subsystem Name) ([]Task, error) {
|
||||
f, err := os.Open(filepath.Join(path, cgroupTasks))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
var (
|
||||
out []Task
|
||||
s = bufio.NewScanner(f)
|
||||
)
|
||||
for s.Scan() {
|
||||
if t := s.Text(); t != "" {
|
||||
pid, err := strconv.Atoi(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, Task{
|
||||
Pid: pid,
|
||||
Subsystem: subsystem,
|
||||
Path: path,
|
||||
})
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func hugePageSizes() ([]string, error) {
|
||||
var (
|
||||
pageSizes []string
|
||||
|
15
vendor/github.com/containerd/containerd/README.md
generated
vendored
15
vendor/github.com/containerd/containerd/README.md
generated
vendored
@ -172,11 +172,9 @@ checkpoint, err := task.Checkpoint(context)
|
||||
err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
|
||||
|
||||
// on a new machine pull the checkpoint and restore the redis container
|
||||
image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
||||
checkpoint, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
||||
|
||||
checkpoint := image.Target()
|
||||
|
||||
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
|
||||
redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint))
|
||||
defer container.Delete(context)
|
||||
|
||||
task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||
@ -212,11 +210,6 @@ See [PLUGINS.md](PLUGINS.md) for how to create plugins
|
||||
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
|
||||
of containerd components.
|
||||
|
||||
### Development reports.
|
||||
|
||||
Weekly summary on the progress and what is being worked on.
|
||||
https://github.com/containerd/containerd/tree/master/reports
|
||||
|
||||
### Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
@ -227,6 +220,10 @@ For sync communication we have a community slack with a #containerd channel that
|
||||
**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com.
|
||||
[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ)
|
||||
|
||||
### Security audit
|
||||
|
||||
A third party security audit was performed by Cure53 in 4Q2018; the [full report](docs/SECURITY_AUDIT.pdf) is available in our docs/ directory.
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
|
2
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
2
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
@ -100,7 +100,7 @@ const (
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
|
||||
|
||||
paxSchilyXattr = "SCHILY.xattrs."
|
||||
paxSchilyXattr = "SCHILY.xattr."
|
||||
)
|
||||
|
||||
// Apply applies a tar stream of an OCI style diff tar.
|
||||
|
8
vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
8
vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
@ -74,7 +74,7 @@ func tarName(p string) (string, error) {
|
||||
// in file names, it is mostly safe to replace however we must
|
||||
// check just in case
|
||||
if strings.Contains(p, "/") {
|
||||
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||
return "", fmt.Errorf("windows path contains forward slash: %s", p)
|
||||
}
|
||||
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
@ -130,11 +130,7 @@ func skipFile(hdr *tar.Header) bool {
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return strings.Contains(hdr.Name, ":")
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
|
2
vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build freebsd linux openbsd solaris
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
4
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
4
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
@ -275,3 +275,7 @@ func Load(set *FIFOSet) (IO, error) {
|
||||
closers: []io.Closer{set},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *pipes) closers() []io.Closer {
|
||||
return []io.Closer{p.Stdin, p.Stdout, p.Stderr}
|
||||
}
|
||||
|
4
vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
4
vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
@ -152,7 +152,3 @@ func NewDirectIO(ctx context.Context, fifos *FIFOSet) (*DirectIO, error) {
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
func (p *pipes) closers() []io.Closer {
|
||||
return []io.Closer{p.Stdin, p.Stdout, p.Stderr}
|
||||
}
|
||||
|
20
vendor/github.com/containerd/containerd/cio/io_windows.go
generated
vendored
20
vendor/github.com/containerd/containerd/cio/io_windows.go
generated
vendored
@ -17,6 +17,7 @@
|
||||
package cio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@ -144,3 +145,22 @@ func NewDirectIO(stdin io.WriteCloser, stdout, stderr io.ReadCloser, terminal bo
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDirectIOFromFIFOSet returns an IO implementation that exposes the IO streams as io.ReadCloser
|
||||
// and io.WriteCloser.
|
||||
func NewDirectIOFromFIFOSet(ctx context.Context, stdin io.WriteCloser, stdout, stderr io.ReadCloser, fifos *FIFOSet) *DirectIO {
|
||||
_, cancel := context.WithCancel(ctx)
|
||||
pipes := pipes{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
}
|
||||
return &DirectIO{
|
||||
pipes: pipes,
|
||||
cio: cio{
|
||||
config: fifos.Config,
|
||||
closers: append(pipes.closers(), fifos),
|
||||
cancel: cancel,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
48
vendor/github.com/containerd/containerd/client.go
generated
vendored
48
vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -61,6 +61,7 @@ import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
@ -292,6 +293,9 @@ type RemoteContext struct {
|
||||
// platforms will be used to create a PlatformMatcher with no ordering
|
||||
// preference.
|
||||
Platforms []string
|
||||
|
||||
// MaxConcurrentDownloads is the max concurrent content downloads for each pull.
|
||||
MaxConcurrentDownloads int
|
||||
}
|
||||
|
||||
func defaultRemoteContext() *RemoteContext {
|
||||
@ -403,12 +407,23 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
||||
}
|
||||
|
||||
var (
|
||||
schema1Converter *schema1.Converter
|
||||
handler images.Handler
|
||||
handler images.Handler
|
||||
|
||||
isConvertible bool
|
||||
converterFunc func(context.Context, ocispec.Descriptor) (ocispec.Descriptor, error)
|
||||
limiter *semaphore.Weighted
|
||||
)
|
||||
|
||||
if desc.MediaType == images.MediaTypeDockerSchema1Manifest && rCtx.ConvertSchema1 {
|
||||
schema1Converter = schema1.NewConverter(store, fetcher)
|
||||
schema1Converter := schema1.NewConverter(store, fetcher)
|
||||
|
||||
handler = images.Handlers(append(rCtx.BaseHandlers, schema1Converter)...)
|
||||
|
||||
isConvertible = true
|
||||
|
||||
converterFunc = func(ctx context.Context, _ ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
return schema1Converter.Convert(ctx)
|
||||
}
|
||||
} else {
|
||||
// Get all the children for a descriptor
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
@ -421,18 +436,37 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
||||
childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
|
||||
}
|
||||
|
||||
// set isConvertible to true if there is application/octet-stream media type
|
||||
convertibleHandler := images.HandlerFunc(
|
||||
func(_ context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.MediaType == docker.LegacyConfigMediaType {
|
||||
isConvertible = true
|
||||
}
|
||||
|
||||
return []ocispec.Descriptor{}, nil
|
||||
},
|
||||
)
|
||||
|
||||
handler = images.Handlers(append(rCtx.BaseHandlers,
|
||||
remotes.FetchHandler(store, fetcher),
|
||||
convertibleHandler,
|
||||
childrenHandler,
|
||||
)...)
|
||||
|
||||
converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
return docker.ConvertManifest(ctx, store, desc)
|
||||
}
|
||||
}
|
||||
|
||||
if err := images.Dispatch(ctx, handler, desc); err != nil {
|
||||
if rCtx.MaxConcurrentDownloads > 0 {
|
||||
limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads))
|
||||
}
|
||||
if err := images.Dispatch(ctx, handler, limiter, desc); err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
if schema1Converter != nil {
|
||||
desc, err = schema1Converter.Convert(ctx)
|
||||
if err != nil {
|
||||
|
||||
if isConvertible {
|
||||
if desc, err = converterFunc(ctx, desc); err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
}
|
||||
|
8
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
8
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
@ -178,3 +178,11 @@ func WithImageHandler(h images.Handler) RemoteOpt {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxConcurrentDownloads sets max concurrent download limit.
|
||||
func WithMaxConcurrentDownloads(max int) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.MaxConcurrentDownloads = max
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
20
vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go
generated
vendored
20
vendor/github.com/containerd/containerd/cmd/containerd/command/service_windows.go
generated
vendored
@ -387,18 +387,16 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S
|
||||
h.fromsvc <- nil
|
||||
|
||||
s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)}
|
||||
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case c := <-r:
|
||||
switch c.Cmd {
|
||||
case svc.Interrogate:
|
||||
s <- c.CurrentStatus
|
||||
case svc.Stop, svc.Shutdown:
|
||||
s <- svc.Status{State: svc.StopPending, Accepts: 0}
|
||||
h.s.Stop()
|
||||
break Loop
|
||||
}
|
||||
for c := range r {
|
||||
switch c.Cmd {
|
||||
case svc.Interrogate:
|
||||
s <- c.CurrentStatus
|
||||
case svc.Stop, svc.Shutdown:
|
||||
s <- svc.Status{State: svc.StopPending, Accepts: 0}
|
||||
h.s.Stop()
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/containerd/containerd/cmd/ctr/commands/commands.go
generated
vendored
4
vendor/github.com/containerd/containerd/cmd/ctr/commands/commands.go
generated
vendored
@ -124,6 +124,10 @@ var (
|
||||
Name: "allow-new-privs",
|
||||
Usage: "turn off OCI spec's NoNewPrivileges feature flag",
|
||||
},
|
||||
cli.Uint64Flag{
|
||||
Name: "memory-limit",
|
||||
Usage: "memory limit (in bytes) for the container",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
@ -14,17 +16,15 @@
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
package commands
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// as at MacOS 10.12 there is apparently no way to set timestamps
|
||||
// with nanosecond precision. We could fall back to utimes/lutimes
|
||||
// and lose the precision as a temporary workaround.
|
||||
func chtimes(path string, atime, mtime time.Time) error {
|
||||
return errors.New("OSX missing UtimesNanoAt")
|
||||
func init() {
|
||||
ContainerFlags = append(ContainerFlags, cli.Uint64Flag{
|
||||
Name: "cpu-count",
|
||||
Usage: "number of CPUs available to the container",
|
||||
})
|
||||
}
|
16
vendor/github.com/containerd/containerd/cmd/ctr/commands/content/content.go
generated
vendored
16
vendor/github.com/containerd/containerd/cmd/ctr/commands/content/content.go
generated
vendored
@ -290,6 +290,11 @@ var (
|
||||
Name: "validate",
|
||||
Usage: "validate the result against a format (json, mediatype, etc.)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "editor",
|
||||
Usage: "select editor (vim, emacs, etc.)",
|
||||
EnvVar: "EDITOR",
|
||||
},
|
||||
},
|
||||
Action: func(context *cli.Context) error {
|
||||
var (
|
||||
@ -320,7 +325,7 @@ var (
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
nrc, err := edit(content.NewReader(ra))
|
||||
nrc, err := edit(context, content.NewReader(ra))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -505,7 +510,12 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func edit(rd io.Reader) (io.ReadCloser, error) {
|
||||
func edit(context *cli.Context, rd io.Reader) (io.ReadCloser, error) {
|
||||
editor := context.String("editor")
|
||||
if editor == "" {
|
||||
return nil, fmt.Errorf("editor is required")
|
||||
}
|
||||
|
||||
tmp, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "edit-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -516,7 +526,7 @@ func edit(rd io.Reader) (io.ReadCloser, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := exec.Command("sh", "-c", "$EDITOR "+tmp.Name())
|
||||
cmd := exec.Command("sh", "-c", fmt.Sprintf("%s %s", editor, tmp.Name()))
|
||||
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
|
15
vendor/github.com/containerd/containerd/cmd/ctr/commands/images/export.go
generated
vendored
15
vendor/github.com/containerd/containerd/cmd/ctr/commands/images/export.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/cmd/ctr/commands"
|
||||
oci "github.com/containerd/containerd/images/oci"
|
||||
"github.com/containerd/containerd/images/oci"
|
||||
"github.com/containerd/containerd/reference"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -52,6 +52,10 @@ Currently, only OCI format is supported.
|
||||
Usage: "media type of manifest digest",
|
||||
Value: ocispec.MediaTypeImageManifest,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "all-platforms",
|
||||
Usage: "exports content from all platforms",
|
||||
},
|
||||
},
|
||||
Action: func(context *cli.Context) error {
|
||||
var (
|
||||
@ -101,7 +105,14 @@ Currently, only OCI format is supported.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
r, err := client.Export(ctx, &oci.V1Exporter{}, desc)
|
||||
|
||||
var (
|
||||
exportOpts []oci.V1ExporterOpt
|
||||
)
|
||||
|
||||
exportOpts = append(exportOpts, oci.WithAllPlatforms(context.Bool("all-platforms")))
|
||||
|
||||
r, err := client.Export(ctx, desc, exportOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
7
vendor/github.com/containerd/containerd/cmd/ctr/commands/run/run.go
generated
vendored
7
vendor/github.com/containerd/containerd/cmd/ctr/commands/run/run.go
generated
vendored
@ -86,9 +86,10 @@ func parseMountFlag(m string) (specs.Mount, error) {
|
||||
|
||||
// Command runs a container
|
||||
var Command = cli.Command{
|
||||
Name: "run",
|
||||
Usage: "run a container",
|
||||
ArgsUsage: "[flags] Image|RootFS ID [COMMAND] [ARG...]",
|
||||
Name: "run",
|
||||
Usage: "run a container",
|
||||
ArgsUsage: "[flags] Image|RootFS ID [COMMAND] [ARG...]",
|
||||
SkipArgReorder: true,
|
||||
Flags: append([]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "rm",
|
||||
|
4
vendor/github.com/containerd/containerd/cmd/ctr/commands/run/run_unix.go
generated
vendored
4
vendor/github.com/containerd/containerd/cmd/ctr/commands/run/run_unix.go
generated
vendored
@ -139,6 +139,10 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli
|
||||
// NOTE: can be set to "" explicitly for disabling cgroup.
|
||||
opts = append(opts, oci.WithCgroup(context.String("cgroup")))
|
||||
}
|
||||
limit := context.Uint64("memory-limit")
|
||||
if limit != 0 {
|
||||
opts = append(opts, oci.WithMemoryLimit(limit))
|
||||
}
|
||||
}
|
||||
|
||||
cOpts = append(cOpts, containerd.WithRuntime(context.String("runtime"), nil))
|
||||
|
8
vendor/github.com/containerd/containerd/cmd/ctr/commands/run/run_windows.go
generated
vendored
8
vendor/github.com/containerd/containerd/cmd/ctr/commands/run/run_windows.go
generated
vendored
@ -105,6 +105,14 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli
|
||||
if context.Bool("isolated") {
|
||||
opts = append(opts, oci.WithWindowsHyperV)
|
||||
}
|
||||
limit := context.Uint64("memory-limit")
|
||||
if limit != 0 {
|
||||
opts = append(opts, oci.WithMemoryLimit(limit))
|
||||
}
|
||||
ccount := context.Uint64("cpu-count")
|
||||
if ccount != 0 {
|
||||
opts = append(opts, oci.WithWindowsCPUCount(ccount))
|
||||
}
|
||||
}
|
||||
|
||||
cOpts = append(cOpts, containerd.WithContainerLabels(commands.LabelArgs(context.StringSlice("label"))))
|
||||
|
57
vendor/github.com/containerd/containerd/cmd/ctr/commands/tasks/checkpoint.go
generated
vendored
57
vendor/github.com/containerd/containerd/cmd/ctr/commands/tasks/checkpoint.go
generated
vendored
@ -36,6 +36,14 @@ var checkpointCommand = cli.Command{
|
||||
Name: "exit",
|
||||
Usage: "stop the container after the checkpoint",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "image-path",
|
||||
Usage: "path to criu image files",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "work-path",
|
||||
Usage: "path to criu work files and logs",
|
||||
},
|
||||
},
|
||||
Action: func(context *cli.Context) error {
|
||||
id := context.Args().First()
|
||||
@ -59,40 +67,55 @@ var checkpointCommand = cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var opts []containerd.CheckpointTaskOpts
|
||||
if context.Bool("exit") {
|
||||
opts = append(opts, withExit(info.Runtime.Name))
|
||||
}
|
||||
opts := []containerd.CheckpointTaskOpts{withCheckpointOpts(info.Runtime.Name, context)}
|
||||
checkpoint, err := task.Checkpoint(ctx, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(checkpoint.Name())
|
||||
if context.String("image-path") == "" {
|
||||
fmt.Println(checkpoint.Name())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func withExit(rt string) containerd.CheckpointTaskOpts {
|
||||
// withCheckpointOpts only suitable for runc runtime now
|
||||
func withCheckpointOpts(rt string, context *cli.Context) containerd.CheckpointTaskOpts {
|
||||
return func(r *containerd.CheckpointTaskInfo) error {
|
||||
imagePath := context.String("image-path")
|
||||
workPath := context.String("work-path")
|
||||
|
||||
switch rt {
|
||||
case "io.containerd.runc.v1":
|
||||
if r.Options == nil {
|
||||
r.Options = &options.CheckpointOptions{
|
||||
Exit: true,
|
||||
}
|
||||
} else {
|
||||
opts, _ := r.Options.(*options.CheckpointOptions)
|
||||
r.Options = &options.CheckpointOptions{}
|
||||
}
|
||||
opts, _ := r.Options.(*options.CheckpointOptions)
|
||||
|
||||
if context.Bool("exit") {
|
||||
opts.Exit = true
|
||||
}
|
||||
default:
|
||||
if imagePath != "" {
|
||||
opts.ImagePath = imagePath
|
||||
}
|
||||
if workPath != "" {
|
||||
opts.WorkPath = workPath
|
||||
}
|
||||
case "io.containerd.runtime.v1.linux":
|
||||
if r.Options == nil {
|
||||
r.Options = &runctypes.CheckpointOptions{
|
||||
Exit: true,
|
||||
}
|
||||
} else {
|
||||
opts, _ := r.Options.(*runctypes.CheckpointOptions)
|
||||
r.Options = &runctypes.CheckpointOptions{}
|
||||
}
|
||||
opts, _ := r.Options.(*runctypes.CheckpointOptions)
|
||||
|
||||
if context.Bool("exit") {
|
||||
opts.Exit = true
|
||||
}
|
||||
if imagePath != "" {
|
||||
opts.ImagePath = imagePath
|
||||
}
|
||||
if workPath != "" {
|
||||
opts.WorkPath = workPath
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
7
vendor/github.com/containerd/containerd/cmd/ctr/commands/tasks/exec.go
generated
vendored
7
vendor/github.com/containerd/containerd/cmd/ctr/commands/tasks/exec.go
generated
vendored
@ -28,9 +28,10 @@ import (
|
||||
|
||||
//TODO:(jessvalarezo) exec-id is optional here, update to required arg
|
||||
var execCommand = cli.Command{
|
||||
Name: "exec",
|
||||
Usage: "execute additional processes in an existing container",
|
||||
ArgsUsage: "[flags] CONTAINER CMD [ARG...]",
|
||||
Name: "exec",
|
||||
Usage: "execute additional processes in an existing container",
|
||||
ArgsUsage: "[flags] CONTAINER CMD [ARG...]",
|
||||
SkipArgReorder: true,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cwd",
|
||||
|
4
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
4
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
@ -47,7 +47,5 @@ func unlock(ref string) {
|
||||
locksMu.Lock()
|
||||
defer locksMu.Unlock()
|
||||
|
||||
if _, ok := locks[ref]; ok {
|
||||
delete(locks, ref)
|
||||
}
|
||||
delete(locks, ref)
|
||||
}
|
||||
|
2
vendor/github.com/containerd/containerd/diff/apply/apply.go
generated
vendored
2
vendor/github.com/containerd/containerd/diff/apply/apply.go
generated
vendored
@ -58,7 +58,7 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [
|
||||
defer func() {
|
||||
if err == nil {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"d": time.Now().Sub(t1),
|
||||
"d": time.Since(t1),
|
||||
"dgst": desc.Digest,
|
||||
"size": desc.Size,
|
||||
"media": desc.MediaType,
|
||||
|
25
vendor/github.com/containerd/containerd/export.go
generated
vendored
25
vendor/github.com/containerd/containerd/export.go
generated
vendored
@ -20,36 +20,23 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/images/oci"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type exportOpts struct {
|
||||
}
|
||||
|
||||
// ExportOpt allows the caller to specify export-specific options
|
||||
type ExportOpt func(c *exportOpts) error
|
||||
|
||||
func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
|
||||
var eopts exportOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&eopts); err != nil {
|
||||
return eopts, err
|
||||
}
|
||||
}
|
||||
return eopts, nil
|
||||
}
|
||||
|
||||
// Export exports an image to a Tar stream.
|
||||
// OCI format is used by default.
|
||||
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
|
||||
// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
|
||||
func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
|
||||
_, err := resolveExportOpt(opts...) // unused now
|
||||
func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...oci.V1ExporterOpt) (io.ReadCloser, error) {
|
||||
|
||||
exporter, err := oci.ResolveV1ExportOpt(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
pw.CloseWithError(errors.Wrap(exporter.Export(ctx, c.ContentStore(), desc, pw), "export failed"))
|
||||
|
1
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
1
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
@ -185,7 +185,6 @@ func (s *scanner) scanQuoted(quote rune) {
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *scanner) scanEscape(quote rune) rune {
|
||||
|
2
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
2
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
@ -45,7 +45,7 @@ var (
|
||||
// Validate return nil if the string s is a valid identifier.
|
||||
//
|
||||
// identifiers must be valid domain names according to RFC 1035, section 2.3.1. To
|
||||
// enforce case insensitvity, all characters must be lower case.
|
||||
// enforce case insensitivity, all characters must be lower case.
|
||||
//
|
||||
// In general, identifiers that pass this validation, should be safe for use as
|
||||
// a domain names or filesystem path component.
|
||||
|
32
vendor/github.com/containerd/containerd/image.go
generated
vendored
32
vendor/github.com/containerd/containerd/image.go
generated
vendored
@ -170,26 +170,22 @@ func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
|
||||
chain = append(chain, layer.Diff.Digest)
|
||||
}
|
||||
|
||||
if unpacked {
|
||||
desc, err := i.i.Config(ctx, cs, i.platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootfs := identity.ChainID(chain).String()
|
||||
|
||||
cinfo := content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snapshotterName): rootfs,
|
||||
},
|
||||
}
|
||||
if _, err := cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", snapshotterName)); err != nil {
|
||||
return err
|
||||
}
|
||||
desc, err := i.i.Config(ctx, cs, i.platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
rootfs := identity.ChainID(chain).String()
|
||||
|
||||
cinfo := content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snapshotterName): rootfs,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", snapshotterName))
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer) ([]rootfs.Layer, error) {
|
||||
|
16
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
16
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -108,19 +109,30 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
|
||||
// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
|
||||
// any children.
|
||||
//
|
||||
// A concurrency limiter can be passed in to limit the number of concurrent
|
||||
// handlers running. When limiter is nil, there is no limit.
|
||||
//
|
||||
// Typically, this function will be used with `FetchHandler`, often composed
|
||||
// with other handlers.
|
||||
//
|
||||
// If any handler returns an error, the dispatch session will be canceled.
|
||||
func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
|
||||
func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, desc := range descs {
|
||||
desc := desc
|
||||
|
||||
if limiter != nil {
|
||||
if err := limiter.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
eg.Go(func() error {
|
||||
desc := desc
|
||||
|
||||
children, err := handler.Handle(ctx, desc)
|
||||
if limiter != nil {
|
||||
limiter.Release(1)
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrSkipDesc {
|
||||
return nil // don't traverse the children.
|
||||
@ -129,7 +141,7 @@ func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor)
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
return Dispatch(ctx, handler, children...)
|
||||
return Dispatch(ctx, handler, limiter, children...)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
40
vendor/github.com/containerd/containerd/images/oci/exporter.go
generated
vendored
40
vendor/github.com/containerd/containerd/images/oci/exporter.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@ -37,6 +38,36 @@ import (
|
||||
// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
|
||||
// -> application/vnd.oci.image.layer.v1.tar+gzip
|
||||
type V1Exporter struct {
|
||||
AllPlatforms bool
|
||||
}
|
||||
|
||||
// V1ExporterOpt allows the caller to set additional options to a new V1Exporter
|
||||
type V1ExporterOpt func(c *V1Exporter) error
|
||||
|
||||
// DefaultV1Exporter return a default V1Exporter pointer
|
||||
func DefaultV1Exporter() *V1Exporter {
|
||||
return &V1Exporter{
|
||||
AllPlatforms: false,
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveV1ExportOpt return a new V1Exporter with V1ExporterOpt
|
||||
func ResolveV1ExportOpt(opts ...V1ExporterOpt) (*V1Exporter, error) {
|
||||
exporter := DefaultV1Exporter()
|
||||
for _, o := range opts {
|
||||
if err := o(exporter); err != nil {
|
||||
return exporter, err
|
||||
}
|
||||
}
|
||||
return exporter, nil
|
||||
}
|
||||
|
||||
// WithAllPlatforms set V1Exporter`s AllPlatforms option
|
||||
func WithAllPlatforms(allPlatforms bool) V1ExporterOpt {
|
||||
return func(c *V1Exporter) error {
|
||||
c.AllPlatforms = allPlatforms
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Export implements Exporter.
|
||||
@ -56,8 +87,15 @@ func (oe *V1Exporter) Export(ctx context.Context, store content.Provider, desc o
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
|
||||
if !oe.AllPlatforms {
|
||||
// get local default platform to fetch image manifest
|
||||
childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Any(platforms.DefaultSpec()))
|
||||
}
|
||||
|
||||
handlers := images.Handlers(
|
||||
images.ChildrenHandler(store),
|
||||
childrenHandler,
|
||||
images.HandlerFunc(exportHandler),
|
||||
)
|
||||
|
||||
|
3
vendor/github.com/containerd/containerd/import.go
generated
vendored
3
vendor/github.com/containerd/containerd/import.go
generated
vendored
@ -99,8 +99,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
||||
})
|
||||
}
|
||||
|
||||
var handler images.HandlerFunc
|
||||
handler = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
// Only save images at top level
|
||||
if desc.Digest != index.Digest {
|
||||
return images.Children(ctx, cs, desc)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user