Merge pull request #1437 from darrenstahlmsft/LCOWConfig

LCOW: Split Windows and Linux HCS config generation
This commit is contained in:
Michael Crosby 2017-08-29 10:28:00 -04:00 committed by GitHub
commit 6ec92ddbc9
22 changed files with 1391 additions and 102 deletions

View File

@ -34,8 +34,9 @@ golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/BurntSushi/toml v0.2.0-21-g9906417 github.com/BurntSushi/toml v0.2.0-21-g9906417
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/Microsoft/go-winio v0.4.4 github.com/Microsoft/go-winio v0.4.4
github.com/Microsoft/hcsshim v0.6.3
github.com/Microsoft/opengcs v0.3.2
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/Microsoft/hcsshim v0.6.1
github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4

82
vendor/github.com/Microsoft/go-winio/vhd/vhd.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// +build windows
package vhd
import "syscall"
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
type virtualStorageType struct {
DeviceID uint32
VendorID [16]byte
}
const virtualDiskAccessNONE uint32 = 0
const virtualDiskAccessATTACHRO uint32 = 65536
const virtualDiskAccessATTACHRW uint32 = 131072
const virtualDiskAccessDETACH uint32 = 262144
const virtualDiskAccessGETINFO uint32 = 524288
const virtualDiskAccessCREATE uint32 = 1048576
const virtualDiskAccessMETAOPS uint32 = 2097152
const virtualDiskAccessREAD uint32 = 851968
const virtualDiskAccessALL uint32 = 4128768
const virtualDiskAccessWRITABLE uint32 = 3276800
const createVirtualDiskFlagNone uint32 = 0
const createVirtualDiskFlagFullPhysicalAllocation uint32 = 1
const createVirtualDiskFlagPreventWritesToSourceDisk uint32 = 2
const createVirtualDiskFlagDoNotCopyMetadataFromParent uint32 = 4
type version2 struct {
UniqueID [16]byte // GUID
MaximumSize uint64
BlockSizeInBytes uint32
SectorSizeInBytes uint32
ParentPath *uint16 // string
SourcePath *uint16 // string
OpenFlags uint32
ParentVirtualStorageType virtualStorageType
SourceVirtualStorageType virtualStorageType
ResiliencyGUID [16]byte // GUID
}
type createVirtualDiskParameters struct {
Version uint32 // Must always be set to 2
Version2 version2
}
// CreateVhdx will create a simple vhdx file at the given path using default values.
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
var defaultType virtualStorageType
parameters := createVirtualDiskParameters{
Version: 2,
Version2: version2{
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
},
}
var handle syscall.Handle
if err := createVirtualDisk(
&defaultType,
path,
virtualDiskAccessNONE,
nil,
createVirtualDiskFlagNone,
0,
&parameters,
nil,
&handle); err != nil {
return err
}
if err := syscall.CloseHandle(handle); err != nil {
return err
}
return nil
}

64
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package vhd
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
)
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
}
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@ -26,6 +26,31 @@ type HNSEndpoint struct {
IsRemoteEndpoint bool `json:",omitempty"` IsRemoteEndpoint bool `json:",omitempty"`
} }
//SystemType represents the type of the system on which actions are done
type SystemType string
// SystemType const
const (
ContainerType SystemType = "Container"
VirtualMachineType SystemType = "VirtualMachine"
HostType SystemType = "Host"
)
// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system
// Supported resource types are Network and Request Types are Add/Remove
type EndpointAttachDetachRequest struct {
ContainerID string `json:"ContainerId,omitempty"`
SystemType SystemType `json:"SystemType"`
CompartmentID uint16 `json:"CompartmentId,omitempty"`
VirtualNICName string `json:"VirtualNicName,omitempty"`
}
// EndpointResquestResponse is object to get the endpoint request response
type EndpointResquestResponse struct {
Success bool
Error string
}
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint // HNSEndpointRequest makes a HNS call to modify/query a network endpoint
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
endpoint := &HNSEndpoint{} endpoint := &HNSEndpoint{}
@ -94,12 +119,12 @@ func modifyNetworkEndpoint(containerID string, endpointID string, request Reques
return nil return nil
} }
// GetHNSEndpointByID // GetHNSEndpointByID get the Endpoint by ID
func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
return HNSEndpointRequest("GET", endpointID, "") return HNSEndpointRequest("GET", endpointID, "")
} }
// GetHNSNetworkName filtered by Name // GetHNSEndpointByName gets the endpoint filtered by Name
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
hnsResponse, err := HNSListEndpointRequest() hnsResponse, err := HNSListEndpointRequest()
if err != nil { if err != nil {
@ -135,7 +160,7 @@ func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) {
return HNSEndpointRequest("DELETE", endpoint.Id, "") return HNSEndpointRequest("DELETE", endpoint.Id, "")
} }
// Delete Endpoint by sending EndpointRequest to HNS // Update Endpoint
func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
operation := "Update" operation := "Update"
title := "HCSShim::HNSEndpoint::" + operation title := "HCSShim::HNSEndpoint::" + operation
@ -144,30 +169,30 @@ func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = hnsCall("POST", "/endpoints/"+endpoint.Id+"/update", string(jsonString), &endpoint) err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint)
return endpoint, err return endpoint, err
} }
// Hot Attach an endpoint to a container // ContainerHotAttach attaches an endpoint to a running container
func (endpoint *HNSEndpoint) HotAttach(containerID string) error { func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error {
operation := "HotAttach" operation := "ContainerHotAttach"
title := "HCSShim::HNSEndpoint::" + operation title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID) logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
return modifyNetworkEndpoint(containerID, endpoint.Id, Add) return modifyNetworkEndpoint(containerID, endpoint.Id, Add)
} }
// Hot Detach an endpoint from a container // ContainerHotDetach detaches an endpoint from a running container
func (endpoint *HNSEndpoint) HotDetach(containerID string) error { func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error {
operation := "HotDetach" operation := "ContainerHotDetach"
title := "HCSShim::HNSEndpoint::" + operation title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID) logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
return modifyNetworkEndpoint(containerID, endpoint.Id, Remove) return modifyNetworkEndpoint(containerID, endpoint.Id, Remove)
} }
// Apply Acl Policy on the Endpoint // ApplyACLPolicy applies Acl Policy on the Endpoint
func (endpoint *HNSEndpoint) ApplyACLPolicy(policy *ACLPolicy) error { func (endpoint *HNSEndpoint) ApplyACLPolicy(policy *ACLPolicy) error {
operation := "ApplyACLPolicy" operation := "ApplyACLPolicy"
title := "HCSShim::HNSEndpoint::" + operation title := "HCSShim::HNSEndpoint::" + operation
@ -181,3 +206,113 @@ func (endpoint *HNSEndpoint) ApplyACLPolicy(policy *ACLPolicy) error {
_, err = endpoint.Update() _, err = endpoint.Update()
return err return err
} }
// ContainerAttach attaches an endpoint to container
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
operation := "ContainerAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
CompartmentID: compartmentID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// ContainerDetach detaches an endpoint from container
func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error {
operation := "ContainerDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// HostAttach attaches a nic on the host
func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
operation := "HostAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
CompartmentID: compartmentID,
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// HostDetach detaches a nic on the host
func (endpoint *HNSEndpoint) HostDetach() error {
operation := "HostDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// VirtualMachineNICAttach attaches a endpoint to a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error {
operation := "VirtualMachineNicAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
VirtualNICName: virtualMachineNICName,
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// VirtualMachineNICDetach detaches a endpoint from a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error {
operation := "VirtualMachineNicDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}

View File

@ -6,6 +6,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// RoutePolicy is a structure defining schema for Route based Policy
type RoutePolicy struct { type RoutePolicy struct {
Policy Policy
DestinationPrefix string `json:"DestinationPrefix,omitempty"` DestinationPrefix string `json:"DestinationPrefix,omitempty"`
@ -13,6 +14,7 @@ type RoutePolicy struct {
EncapEnabled bool `json:"NeedEncap,omitempty"` EncapEnabled bool `json:"NeedEncap,omitempty"`
} }
// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy
type ELBPolicy struct { type ELBPolicy struct {
LBPolicy LBPolicy
SourceVIP string `json:"SourceVIP,omitempty"` SourceVIP string `json:"SourceVIP,omitempty"`
@ -20,6 +22,7 @@ type ELBPolicy struct {
ILB bool `json:"ILB,omitempty"` ILB bool `json:"ILB,omitempty"`
} }
// LBPolicy is a structure defining schema for LoadBalancing based Policy
type LBPolicy struct { type LBPolicy struct {
Policy Policy
Protocol uint16 `json:"Protocol,omitempty"` Protocol uint16 `json:"Protocol,omitempty"`
@ -27,10 +30,11 @@ type LBPolicy struct {
ExternalPort uint16 ExternalPort uint16
} }
// PolicyList is a structure defining schema for Policy list request
type PolicyList struct { type PolicyList struct {
Id string `json:"ID,omitempty"` ID string `json:"ID,omitempty"`
EndpointReferences []string `json:"References,omitempty"` EndpointReferences []string `json:"References,omitempty"`
Policies []string `json:"Policies,omitempty"` Policies []json.RawMessage `json:"Policies,omitempty"`
} }
// HNSPolicyListRequest makes a call into HNS to update/query a single network // HNSPolicyListRequest makes a call into HNS to update/query a single network
@ -44,6 +48,7 @@ func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) {
return &policy, nil return &policy, nil
} }
// HNSListPolicyListRequest gets all the policy list
func HNSListPolicyListRequest() ([]PolicyList, error) { func HNSListPolicyListRequest() ([]PolicyList, error) {
var plist []PolicyList var plist []PolicyList
err := hnsCall("GET", "/policylists/", "", &plist) err := hnsCall("GET", "/policylists/", "", &plist)
@ -54,7 +59,7 @@ func HNSListPolicyListRequest() ([]PolicyList, error) {
return plist, nil return plist, nil
} }
// PolicyListRequest makes a HNS call to modify/query a network endpoint // PolicyListRequest makes a HNS call to modify/query a network policy list
func PolicyListRequest(method, path, request string) (*PolicyList, error) { func PolicyListRequest(method, path, request string) (*PolicyList, error) {
policylist := &PolicyList{} policylist := &PolicyList{}
err := hnsCall(method, "/policylists/"+path, request, &policylist) err := hnsCall(method, "/policylists/"+path, request, &policylist)
@ -65,11 +70,16 @@ func PolicyListRequest(method, path, request string) (*PolicyList, error) {
return policylist, nil return policylist, nil
} }
// GetPolicyListByID get the policy list by ID
func GetPolicyListByID(policyListID string) (*PolicyList, error) {
return PolicyListRequest("GET", policyListID, "")
}
// Create PolicyList by sending PolicyListRequest to HNS. // Create PolicyList by sending PolicyListRequest to HNS.
func (policylist *PolicyList) Create() (*PolicyList, error) { func (policylist *PolicyList) Create() (*PolicyList, error) {
operation := "Create" operation := "Create"
title := "HCSShim::PolicyList::" + operation title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.Id) logrus.Debugf(title+" id=%s", policylist.ID)
jsonString, err := json.Marshal(policylist) jsonString, err := json.Marshal(policylist)
if err != nil { if err != nil {
return nil, err return nil, err
@ -77,20 +87,20 @@ func (policylist *PolicyList) Create() (*PolicyList, error) {
return PolicyListRequest("POST", "", string(jsonString)) return PolicyListRequest("POST", "", string(jsonString))
} }
// Create PolicyList by sending PolicyListRequest to HNS // Delete deletes PolicyList
func (policylist *PolicyList) Delete() (*PolicyList, error) { func (policylist *PolicyList) Delete() (*PolicyList, error) {
operation := "Delete" operation := "Delete"
title := "HCSShim::PolicyList::" + operation title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.Id) logrus.Debugf(title+" id=%s", policylist.ID)
return PolicyListRequest("DELETE", policylist.Id, "") return PolicyListRequest("DELETE", policylist.ID, "")
} }
// Add an endpoint to a Policy List // AddEndpoint add an endpoint to a Policy List
func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "AddEndpoint" operation := "AddEndpoint"
title := "HCSShim::PolicyList::" + operation title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.Id, endpoint.Id) logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete() _, err := policylist.Delete()
if err != nil { if err != nil {
@ -103,11 +113,11 @@ func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, e
return policylist.Create() return policylist.Create()
} }
// Remove an endpoint from the Policy List // RemoveEndpoint removes an endpoint from the Policy List
func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "RemoveEndpoint" operation := "RemoveEndpoint"
title := "HCSShim::PolicyList::" + operation title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.Id, endpoint.Id) logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete() _, err := policylist.Delete()
if err != nil { if err != nil {
@ -129,16 +139,20 @@ func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList
} }
// AddLoadBalancer policy list for the specified endpoints // AddLoadBalancer policy list for the specified endpoints
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
operation := "AddLoadBalancer" operation := "AddLoadBalancer"
title := "HCSShim::PolicyList::" + operation title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" Vip:%s", vip) logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
policylist := &PolicyList{} policylist := &PolicyList{}
elbPolicy := &ELBPolicy{ elbPolicy := &ELBPolicy{
VIPs: []string{vip}, SourceVIP: sourceVIP,
ILB: isILB, ILB: isILB,
}
if len(vip) > 0 {
elbPolicy.VIPs = []string{vip}
} }
elbPolicy.Type = ExternalLoadBalancer elbPolicy.Type = ExternalLoadBalancer
elbPolicy.Protocol = protocol elbPolicy.Protocol = protocol
@ -153,12 +167,11 @@ func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, vip string, protocol u
if err != nil { if err != nil {
return nil, err return nil, err
} }
policylist.Policies = append(policylist.Policies, jsonString)
policylist.Policies[0] = string(jsonString)
return policylist.Create() return policylist.Create()
} }
// AddLoadBalancer policy list for the specified endpoints // AddRoute adds route policy list for the specified endpoints
func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) {
operation := "AddRoute" operation := "AddRoute"
title := "HCSShim::PolicyList::" + operation title := "HCSShim::PolicyList::" + operation
@ -182,6 +195,6 @@ func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string,
return nil, err return nil, err
} }
policylist.Policies[0] = string(jsonString) policylist.Policies = append(policylist.Policies, jsonString)
return policylist.Create() return policylist.Create()
} }

View File

@ -48,6 +48,8 @@ type HvRuntime struct {
LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM
LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM
LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode
BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD
WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD
} }
type MappedVirtualDisk struct { type MappedVirtualDisk struct {

View File

@ -307,6 +307,16 @@ func (r *legacyLayerReader) Read(b []byte) (int, error) {
return r.backupReader.Read(b) return r.backupReader.Read(b)
} }
func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) {
if r.backupReader == nil {
if r.currentFile == nil {
return 0, errors.New("no current file")
}
return r.currentFile.Seek(offset, whence)
}
return 0, errors.New("seek not supported on this stream")
}
func (r *legacyLayerReader) Close() error { func (r *legacyLayerReader) Close() error {
r.proceed <- false r.proceed <- false
<-r.result <-r.result

21
vendor/github.com/Microsoft/opengcs/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

14
vendor/github.com/Microsoft/opengcs/README.md generated vendored Normal file
View File

@ -0,0 +1,14 @@
# Open Guest Compute Service (opengcs) [![Build Status](https://travis-ci.org/Microsoft/opengcs.svg?branch=master)](https://travis-ci.org/Microsoft/opengcs)
Open Guest Compute Service is a Linux open source project to further the development of a production quality implementation of Linux Hyper-V container on Windows (LCOW). It's designed to run inside a custom Linux OS for supporting Linux container payload.
# Getting Started
[How to build GCS binaries](./docs/gcsbuildinstructions.md/)
[How to build custom Linux OS images](./docs/customosbuildinstructions.md/)
# Contributing
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

274
vendor/github.com/Microsoft/opengcs/client/config.go generated vendored Normal file
View File

@ -0,0 +1,274 @@
// +build windows
package client
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/Microsoft/hcsshim"
"github.com/sirupsen/logrus"
)
// Mode is the operational mode, both requested, and actual after verification
type Mode uint
const (
// Constants for the actual mode after validation
// ModeActualError means an error has occurred during validation
ModeActualError = iota
// ModeActualVhdx means that we are going to use VHDX boot after validation
ModeActualVhdx
// ModeActualKernelInitrd means that we are going to use kernel+initrd for boot after validation
ModeActualKernelInitrd
// Constants for the requested mode
// ModeRequestAuto means auto-select the boot mode for a utility VM
ModeRequestAuto = iota // VHDX will be priority over kernel+initrd
// ModeRequestVhdx means request VHDX boot if possible
ModeRequestVhdx
// ModeRequestKernelInitrd means request Kernel+initrd boot if possible
ModeRequestKernelInitrd
// defaultUvmTimeoutSeconds is the default time to wait for utility VM operations
defaultUvmTimeoutSeconds = 5 * 60
// DefaultVhdxSizeGB is the size of the default sandbox & scratch in GB
DefaultVhdxSizeGB = 20
// defaultVhdxBlockSizeMB is the block-size for the sandbox/scratch VHDx's this package can create.
defaultVhdxBlockSizeMB = 1
)
// Config is the structure used to configuring a utility VM. There are two ways
// of starting. Either supply a VHD, or a Kernel+Initrd. For the latter, both
// must be supplied, and both must be in the same directory.
//
// VHD is the priority.
type Config struct {
Options // Configuration options
Name string // Name of the utility VM
RequestedMode Mode // What mode is preferred when validating
ActualMode Mode // What mode was obtained during validation
UvmTimeoutSeconds int // How long to wait for the utility VM to respond in seconds
Uvm hcsshim.Container // The actual container
MappedVirtualDisks []hcsshim.MappedVirtualDisk // Data-disks to be attached
}
// Options is the structure used by a client to define configurable options for a utility VM.
type Options struct {
KirdPath string // Path to where kernel/initrd are found (defaults to %PROGRAMFILES%\Linux Containers)
KernelFile string // Kernel for Utility VM (embedded in a UEFI bootloader) - does NOT include full path, just filename
InitrdFile string // Initrd image for Utility VM - does NOT include full path, just filename
Vhdx string // VHD for booting the utility VM - is a full path
TimeoutSeconds int // Requested time for the utility VM to respond in seconds (may be over-ridden by environment)
BootParameters string // Additional boot parameters for initrd booting (not VHDx)
}
// ParseOptions parses a set of K-V pairs into options used by opengcs. Note
// for consistency with the LCOW graphdriver in docker, we keep the same
// convention of an `lcow.` prefix.
func ParseOptions(options []string) (Options, error) {
rOpts := Options{TimeoutSeconds: 0}
for _, v := range options {
opt := strings.SplitN(v, "=", 2)
if len(opt) == 2 {
switch strings.ToLower(opt[0]) {
case "lcow.kirdpath":
rOpts.KirdPath = opt[1]
case "lcow.kernel":
rOpts.KernelFile = opt[1]
case "lcow.initrd":
rOpts.InitrdFile = opt[1]
case "lcow.vhdx":
rOpts.Vhdx = opt[1]
case "lcow.bootparameters":
rOpts.BootParameters = opt[1]
case "lcow.timeout":
var err error
if rOpts.TimeoutSeconds, err = strconv.Atoi(opt[1]); err != nil {
return rOpts, fmt.Errorf("opengcstimeoutsecs option could not be interpreted as an integer")
}
if rOpts.TimeoutSeconds < 0 {
return rOpts, fmt.Errorf("opengcstimeoutsecs option cannot be negative")
}
}
}
}
// Set default values if not supplied
if rOpts.KirdPath == "" {
rOpts.KirdPath = filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers")
}
if rOpts.Vhdx == "" {
rOpts.Vhdx = filepath.Join(rOpts.KirdPath, `uvm.vhdx`)
}
if rOpts.KernelFile == "" {
rOpts.KernelFile = `bootx64.efi`
}
if rOpts.InitrdFile == "" {
rOpts.InitrdFile = `initrd.img`
}
return rOpts, nil
}
// GenerateDefault generates a default config from a set of options
// If baseDir is not supplied, defaults to $env:ProgramFiles\Linux Containers
func (config *Config) GenerateDefault(options []string) error {
// Parse the options that the user supplied.
var err error
config.Options, err = ParseOptions(options)
if err != nil {
return err
}
// Get the timeout from the environment
envTimeoutSeconds := 0
envTimeout := os.Getenv("OPENGCS_UVM_TIMEOUT_SECONDS")
if len(envTimeout) > 0 {
var err error
if envTimeoutSeconds, err = strconv.Atoi(envTimeout); err != nil {
return fmt.Errorf("OPENGCS_UVM_TIMEOUT_SECONDS could not be interpreted as an integer")
}
if envTimeoutSeconds < 0 {
return fmt.Errorf("OPENGCS_UVM_TIMEOUT_SECONDS cannot be negative")
}
}
// Priority to the requested timeout from the options.
if config.TimeoutSeconds != 0 {
config.UvmTimeoutSeconds = config.TimeoutSeconds
return nil
}
// Next priority, the environment
if envTimeoutSeconds != 0 {
config.UvmTimeoutSeconds = envTimeoutSeconds
return nil
}
// Last priority is the default timeout
config.UvmTimeoutSeconds = defaultUvmTimeoutSeconds
// Set the default requested mode
config.RequestedMode = ModeRequestAuto
return nil
}
// Validate validates a Config structure for starting a utility VM.
func (config *Config) Validate() error {
config.ActualMode = ModeActualError
if config.RequestedMode == ModeRequestVhdx && config.Vhdx == "" {
return fmt.Errorf("VHDx mode must supply a VHDx")
}
if config.RequestedMode == ModeRequestKernelInitrd && (config.KernelFile == "" || config.InitrdFile == "") {
return fmt.Errorf("kernel+initrd mode must supply both kernel and initrd")
}
// Validate that if VHDX requested or auto, it exists.
if config.RequestedMode == ModeRequestAuto || config.RequestedMode == ModeRequestVhdx {
if _, err := os.Stat(config.Vhdx); os.IsNotExist(err) {
if config.RequestedMode == ModeRequestVhdx {
return fmt.Errorf("VHDx '%s' not found", config.Vhdx)
}
} else {
config.ActualMode = ModeActualVhdx
// Can't specify boot parameters with VHDx
if config.BootParameters != "" {
return fmt.Errorf("Boot parameters cannot be specified in VHDx mode")
}
return nil
}
}
// So must be kernel+initrd, or auto where we fallback as the VHDX doesn't exist
if config.InitrdFile == "" || config.KernelFile == "" {
if config.RequestedMode == ModeRequestKernelInitrd {
return fmt.Errorf("initrd and kernel options must be supplied")
}
return fmt.Errorf("opengcs: configuration is invalid")
}
if _, err := os.Stat(filepath.Join(config.KirdPath, config.KernelFile)); os.IsNotExist(err) {
return fmt.Errorf("kernel '%s' not found", filepath.Join(config.KirdPath, config.KernelFile))
}
if _, err := os.Stat(filepath.Join(config.KirdPath, config.InitrdFile)); os.IsNotExist(err) {
return fmt.Errorf("initrd '%s' not found", filepath.Join(config.KirdPath, config.InitrdFile))
}
config.ActualMode = ModeActualKernelInitrd
// Ensure all the MappedVirtualDisks exist on the host
for _, mvd := range config.MappedVirtualDisks {
if _, err := os.Stat(mvd.HostPath); err != nil {
return fmt.Errorf("mapped virtual disk '%s' not found", mvd.HostPath)
}
if mvd.ContainerPath == "" {
return fmt.Errorf("mapped virtual disk '%s' requested without a container path", mvd.HostPath)
}
}
return nil
}
// StartUtilityVM creates and starts a utility VM from a configuration.
func (config *Config) StartUtilityVM() error {
logrus.Debugf("opengcs: StartUtilityVM: %+v", config)
if err := config.Validate(); err != nil {
return err
}
configuration := &hcsshim.ContainerConfig{
HvPartition: true,
Name: config.Name,
SystemType: "container",
ContainerType: "linux",
TerminateOnLastHandleClosed: true,
MappedVirtualDisks: config.MappedVirtualDisks,
}
if config.ActualMode == ModeActualVhdx {
configuration.HvRuntime = &hcsshim.HvRuntime{
ImagePath: config.Vhdx,
BootSource: "Vhd",
WritableBootSource: true,
}
} else {
configuration.HvRuntime = &hcsshim.HvRuntime{
ImagePath: config.KirdPath,
LinuxInitrdFile: config.InitrdFile,
LinuxKernelFile: config.KernelFile,
LinuxBootParameters: config.BootParameters,
}
}
configurationS, _ := json.Marshal(configuration)
logrus.Debugf("opengcs: StartUtilityVM: calling HCS with '%s'", string(configurationS))
uvm, err := hcsshim.CreateContainer(config.Name, configuration)
if err != nil {
return err
}
logrus.Debugf("opengcs: StartUtilityVM: uvm created, starting...")
err = uvm.Start()
if err != nil {
logrus.Debugf("opengcs: StartUtilityVM: uvm failed to start: %s", err)
// Make sure we don't leave it laying around as it's been created in HCS
uvm.Terminate()
return err
}
config.Uvm = uvm
logrus.Debugf("opengcs StartUtilityVM: uvm %s is running", config.Name)
return nil
}

View File

@ -0,0 +1,165 @@
// +build windows
package client
import (
"bytes"
"fmt"
"os"
"strings"
"time"
winio "github.com/Microsoft/go-winio/vhd"
// "github.com/Microsoft/hcsshim"
"github.com/sirupsen/logrus"
)
// dismount is a simple utility function wrapping a conditional HotRemove. It would
// have been easier if you could cancel a deferred function, but this works just
// as well.
func (config *Config) dismount(file string) error {
logrus.Debugf("opengcs: CreateExt4Vhdx: hot-remove of %s", file)
err := config.HotRemoveVhd(file)
if err != nil {
logrus.Warnf("failed to hot-remove: %s", err)
}
return err
}
// CreateExt4Vhdx does what it says on the tin. It is the responsibility of the caller to synchronise
// simultaneous attempts to create the cache file.
func (config *Config) CreateExt4Vhdx(destFile string, sizeGB uint32, cacheFile string) error {
// Smallest we can accept is the default sandbox size as we can't size down, only expand.
if sizeGB < DefaultVhdxSizeGB {
sizeGB = DefaultVhdxSizeGB
}
logrus.Debugf("opengcs: CreateExt4Vhdx: %s size:%dGB cache:%s", destFile, sizeGB, cacheFile)
// Retrieve from cache if the default size and already on disk
if cacheFile != "" && sizeGB == DefaultVhdxSizeGB {
if _, err := os.Stat(cacheFile); err == nil {
if err := CopyFile(cacheFile, destFile, false); err != nil {
return fmt.Errorf("failed to copy cached file '%s' to '%s': %s", cacheFile, destFile, err)
}
logrus.Debugf("opengcs: CreateExt4Vhdx: %s fulfilled from cache", destFile)
return nil
}
}
// Must have a utility VM to operate on
if config.Uvm == nil {
return fmt.Errorf("no utility VM")
}
// Create the VHDX
if err := winio.CreateVhdx(destFile, sizeGB, defaultVhdxBlockSizeMB); err != nil {
return fmt.Errorf("failed to create VHDx %s: %s", destFile, err)
}
// Attach it to the utility VM, but don't mount it (as there's no filesystem on it)
if err := config.HotAddVhd(destFile, "", false, false); err != nil {
return fmt.Errorf("opengcs: CreateExt4Vhdx: failed to hot-add %s to utility VM: %s", cacheFile, err)
}
// Get the list of mapped virtual disks to find the controller and LUN IDs
logrus.Debugf("opengcs: CreateExt4Vhdx: %s querying mapped virtual disks", destFile)
mvdControllers, err := config.Uvm.MappedVirtualDisks()
if err != nil {
return fmt.Errorf("failed to get mapped virtual disks: %s", err)
}
// Find our mapped disk from the list of all currently added.
controller := -1
lun := -1
for controllerNumber, controllerElement := range mvdControllers {
for diskNumber, diskElement := range controllerElement.MappedVirtualDisks {
if diskElement.HostPath == destFile {
controller = controllerNumber
lun = diskNumber
break
}
}
}
if controller == -1 || lun == -1 {
config.dismount(destFile)
return fmt.Errorf("failed to find %s in mapped virtual disks after hot-adding", destFile)
}
logrus.Debugf("opengcs: CreateExt4Vhdx: %s at C=%d L=%d", destFile, controller, lun)
// Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory
testdCommand := fmt.Sprintf(`test -d /sys/bus/scsi/devices/%d:0:0:%d`, controller, lun)
testdProc, err := config.RunProcess(testdCommand, nil, nil, nil)
if err != nil {
config.dismount(destFile)
return fmt.Errorf("failed to `%s` following hot-add %s to utility VM: %s", testdCommand, destFile, err)
}
defer testdProc.Close()
testdProc.WaitTimeout(time.Duration(int(time.Second) * config.UvmTimeoutSeconds))
testdExitCode, err := testdProc.ExitCode()
if err != nil {
config.dismount(destFile)
return fmt.Errorf("failed to get exit code from `%s` following hot-add %s to utility VM: %s", testdCommand, destFile, err)
}
if testdExitCode != 0 {
config.dismount(destFile)
return fmt.Errorf("`%s` return non-zero exit code (%d) following hot-add %s to utility VM", testdCommand, testdExitCode, destFile)
}
// Get the device from under the block subdirectory by doing a simple ls. This will come back as (eg) `sda`
lsCommand := fmt.Sprintf(`ls /sys/bus/scsi/devices/%d:0:0:%d/block`, controller, lun)
var lsOutput bytes.Buffer
lsProc, err := config.RunProcess(lsCommand, nil, &lsOutput, nil)
if err != nil {
config.dismount(destFile)
return fmt.Errorf("failed to `%s` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
}
defer lsProc.Close()
lsProc.WaitTimeout(time.Duration(int(time.Second) * config.UvmTimeoutSeconds))
lsExitCode, err := lsProc.ExitCode()
if err != nil {
config.dismount(destFile)
return fmt.Errorf("failed to get exit code from `%s` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
}
if lsExitCode != 0 {
config.dismount(destFile)
return fmt.Errorf("`%s` return non-zero exit code (%d) following hot-add %s to utility VM", lsCommand, lsExitCode, destFile)
}
device := fmt.Sprintf(`/dev/%s`, strings.TrimSpace(lsOutput.String()))
logrus.Debugf("opengcs: CreateExt4Vhdx: %s: device at %s", destFile, device)
// Format it ext4
mkfsCommand := fmt.Sprintf(`mkfs.ext4 -q -E lazy_itable_init=1 -O ^has_journal,sparse_super2,uninit_bg,^resize_inode %s`, device)
var mkfsStderr bytes.Buffer
mkfsProc, err := config.RunProcess(mkfsCommand, nil, nil, &mkfsStderr)
if err != nil {
config.dismount(destFile)
return fmt.Errorf("failed to RunProcess %q following hot-add %s to utility VM: %s", destFile, mkfsCommand, err)
}
defer mkfsProc.Close()
mkfsProc.WaitTimeout(time.Duration(int(time.Second) * config.UvmTimeoutSeconds))
mkfsExitCode, err := mkfsProc.ExitCode()
if err != nil {
config.dismount(destFile)
return fmt.Errorf("failed to get exit code from `%s` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err)
}
if mkfsExitCode != 0 {
config.dismount(destFile)
return fmt.Errorf("`%s` return non-zero exit code (%d) following hot-add %s to utility VM: %s", mkfsCommand, mkfsExitCode, destFile, strings.TrimSpace(mkfsStderr.String()))
}
// Dismount before we copy it
if err := config.dismount(destFile); err != nil {
return fmt.Errorf("failed to hot-remove: %s", err)
}
// Populate the cache.
if cacheFile != "" && (sizeGB == DefaultVhdxSizeGB) {
if err := CopyFile(destFile, cacheFile, true); err != nil {
return fmt.Errorf("failed to seed cache '%s' from '%s': %s", destFile, cacheFile, err)
}
}
logrus.Debugf("opengcs: CreateExt4Vhdx: %s created (non-cache)", destFile)
return nil
}

View File

@ -0,0 +1,40 @@
// +build windows
package client
import (
"fmt"
"github.com/Microsoft/hcsshim"
"github.com/sirupsen/logrus"
)
// HotAddVhd hot-adds a VHD to a utility VM. This is used in the global one-utility-VM-
// service-VM per host scenario. In order to do a graphdriver `Diff`, we hot-add the
// sandbox to /mnt/<id> so that we can run `exportSandbox` inside the utility VM to
// get a tar-stream of the sandboxes contents back to the daemon.
func (config *Config) HotAddVhd(hostPath string, containerPath string, readOnly bool, mount bool) error {
logrus.Debugf("opengcs: HotAddVhd: %s: %s", hostPath, containerPath)
if config.Uvm == nil {
return fmt.Errorf("cannot hot-add VHD as no utility VM is in configuration")
}
modification := &hcsshim.ResourceModificationRequestResponse{
Resource: "MappedVirtualDisk",
Data: hcsshim.MappedVirtualDisk{
HostPath: hostPath,
ContainerPath: containerPath,
CreateInUtilityVM: true,
ReadOnly: readOnly,
AttachOnly: !mount,
},
Request: "Add",
}
if err := config.Uvm.Modify(modification); err != nil {
return fmt.Errorf("failed to modify utility VM configuration for hot-add: %s", err)
}
logrus.Debugf("opengcs: HotAddVhd: %s added successfully", hostPath)
return nil
}

View File

@ -0,0 +1,34 @@
// +build windows
package client
import (
"fmt"
"github.com/Microsoft/hcsshim"
"github.com/sirupsen/logrus"
)
// HotRemoveVhd hot-removes a VHD from a utility VM. This is used in the global one-utility-VM-
// service-VM per host scenario.
func (config *Config) HotRemoveVhd(hostPath string) error {
logrus.Debugf("opengcs: HotRemoveVhd: %s", hostPath)
if config.Uvm == nil {
return fmt.Errorf("cannot hot-add VHD as no utility VM is in configuration")
}
modification := &hcsshim.ResourceModificationRequestResponse{
Resource: "MappedVirtualDisk",
Data: hcsshim.MappedVirtualDisk{
HostPath: hostPath,
CreateInUtilityVM: true,
},
Request: "Remove",
}
if err := config.Uvm.Modify(modification); err != nil {
return fmt.Errorf("failed modifying utility VM for hot-remove %s: %s", hostPath, err)
}
logrus.Debugf("opengcs: HotRemoveVhd: %s removed successfully", hostPath)
return nil
}

View File

@ -0,0 +1,31 @@
// +build windows
package client
import (
"fmt"
"os"
"path/filepath"
)
// LayerVhdDetails is a utility for getting a file name, size and indication of
// sandbox for a VHD(x) in a folder. A read-only layer will be layer.vhd. A
// read-write layer will be sandbox.vhdx.
func LayerVhdDetails(folder string) (string, int64, bool, error) {
var fileInfo os.FileInfo
isSandbox := false
filename := filepath.Join(folder, "layer.vhd")
var err error
if fileInfo, err = os.Stat(filename); err != nil {
filename = filepath.Join(folder, "sandbox.vhdx")
if fileInfo, err = os.Stat(filename); err != nil {
if os.IsNotExist(err) {
return "", 0, isSandbox, fmt.Errorf("could not find layer or sandbox in %s", folder)
}
return "", 0, isSandbox, fmt.Errorf("error locating layer or sandbox in %s: %s", folder, err)
}
isSandbox = true
}
return filename, fileInfo.Size(), isSandbox, nil
}

112
vendor/github.com/Microsoft/opengcs/client/process.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// +build windows
package client
import (
"fmt"
"io"
"github.com/Microsoft/hcsshim"
"github.com/sirupsen/logrus"
)
// Process is the structure pertaining to a process running in a utility VM.
type process struct {
Process hcsshim.Process
Stdin io.WriteCloser
Stdout io.ReadCloser
Stderr io.ReadCloser
}
// createUtilsProcess is a convenient wrapper for hcsshim.createUtilsProcess to use when
// communicating with a utility VM.
func (config *Config) createUtilsProcess(commandLine string) (process, error) {
logrus.Debugf("opengcs: createUtilsProcess")
if config.Uvm == nil {
return process{}, fmt.Errorf("cannot create utils process as no utility VM is in configuration")
}
var (
err error
proc process
)
env := make(map[string]string)
env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
processConfig := &hcsshim.ProcessConfig{
EmulateConsole: false,
CreateStdInPipe: true,
CreateStdOutPipe: true,
CreateStdErrPipe: true,
CreateInUtilityVm: true,
WorkingDirectory: "/bin",
Environment: env,
CommandLine: commandLine,
}
proc.Process, err = config.Uvm.CreateProcess(processConfig)
if err != nil {
return process{}, fmt.Errorf("failed to create process (%+v) in utility VM: %s", config, err)
}
if proc.Stdin, proc.Stdout, proc.Stderr, err = proc.Process.Stdio(); err != nil {
proc.Process.Kill() // Should this have a timeout?
proc.Process.Close()
return process{}, fmt.Errorf("failed to get stdio pipes for process %+v: %s", config, err)
}
logrus.Debugf("opengcs: createUtilsProcess success: pid %d", proc.Process.Pid())
return proc, nil
}
// RunProcess runs the given command line program in the utilityVM. It takes in
// an input to the reader to feed into stdin and returns stdout to output.
// IMPORTANT: It is the responsibility of the caller to call Close() on the returned process.
func (config *Config) RunProcess(commandLine string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (hcsshim.Process, error) {
logrus.Debugf("opengcs: RunProcess: %s", commandLine)
process, err := config.createUtilsProcess(commandLine)
if err != nil {
return nil, err
}
// Send the data into the process's stdin
if stdin != nil {
if _, err = copyWithTimeout(process.Stdin,
stdin,
0,
config.UvmTimeoutSeconds,
fmt.Sprintf("send to stdin of %s", commandLine)); err != nil {
return nil, err
}
// Don't need stdin now we've sent everything. This signals GCS that we are finished sending data.
if err := process.Process.CloseStdin(); err != nil {
return nil, err
}
}
if stdout != nil {
// Copy the data over to the writer.
if _, err := copyWithTimeout(stdout,
process.Stdout,
0,
config.UvmTimeoutSeconds,
fmt.Sprintf("RunProcess: copy back from %s", commandLine)); err != nil {
return nil, err
}
}
if stderr != nil {
// Copy the data over to the writer.
if _, err := copyWithTimeout(stderr,
process.Stderr,
0,
config.UvmTimeoutSeconds,
fmt.Sprintf("RunProcess: copy back from %s", commandLine)); err != nil {
return nil, err
}
}
logrus.Debugf("opengcs: runProcess success: %s", commandLine)
return process.Process, nil
}

44
vendor/github.com/Microsoft/opengcs/client/tartovhd.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// +build windows
package client
import (
"fmt"
"io"
"github.com/sirupsen/logrus"
)
// TarToVhd streams a tarstream contained in an io.Reader to a fixed vhd file
func (config *Config) TarToVhd(targetVHDFile string, reader io.Reader) (int64, error) {
logrus.Debugf("opengcs: TarToVhd: %s", targetVHDFile)
if config.Uvm == nil {
return 0, fmt.Errorf("cannot Tar2Vhd as no utility VM is in configuration")
}
process, err := config.createUtilsProcess("tar2vhd")
if err != nil {
return 0, fmt.Errorf("failed to start tar2vhd for %s: %s", targetVHDFile, err)
}
defer process.Process.Close()
// Send the tarstream into the `tar2vhd`s stdin
if _, err = copyWithTimeout(process.Stdin, reader, 0, config.UvmTimeoutSeconds, fmt.Sprintf("stdin of tar2vhd for generating %s", targetVHDFile)); err != nil {
return 0, fmt.Errorf("failed sending to tar2vhd for %s: %s", targetVHDFile, err)
}
// Don't need stdin now we've sent everything. This signals GCS that we are finished sending data.
if err := process.Process.CloseStdin(); err != nil {
return 0, fmt.Errorf("failed closing stdin handle for %s: %s", targetVHDFile, err)
}
// Write stdout contents of `tar2vhd` to the VHD file
payloadSize, err := writeFileFromReader(targetVHDFile, process.Stdout, config.UvmTimeoutSeconds, fmt.Sprintf("stdout of tar2vhd to %s", targetVHDFile))
if err != nil {
return 0, fmt.Errorf("failed to write %s during tar2vhd: %s", targetVHDFile, err)
}
logrus.Debugf("opengcs: TarToVhd: %s created, %d bytes", targetVHDFile, payloadSize)
return payloadSize, err
}

View File

@ -0,0 +1,3 @@
// +build !windows
package client

View File

@ -0,0 +1,99 @@
// +build windows
package client
import (
"fmt"
"io"
"os"
"syscall"
"time"
"unsafe"
"github.com/sirupsen/logrus"
)
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procCopyFileW = modkernel32.NewProc("CopyFileW")
)
// writeFileFromReader writes an output file from an io.Reader
func writeFileFromReader(path string, reader io.Reader, timeoutSeconds int, context string) (int64, error) {
outFile, err := os.Create(path)
if err != nil {
return 0, fmt.Errorf("opengcs: writeFileFromReader: failed to create %s: %s", path, err)
}
defer outFile.Close()
return copyWithTimeout(outFile, reader, 0, timeoutSeconds, context)
}
// copyWithTimeout is a wrapper for io.Copy using a timeout duration
func copyWithTimeout(dst io.Writer, src io.Reader, size int64, timeoutSeconds int, context string) (int64, error) {
logrus.Debugf("opengcs: copywithtimeout: size %d: timeout %d: (%s)", size, timeoutSeconds, context)
type resultType struct {
err error
bytes int64
}
done := make(chan resultType, 1)
go func() {
result := resultType{}
result.bytes, result.err = io.Copy(dst, src)
done <- result
}()
var result resultType
timedout := time.After(time.Duration(timeoutSeconds) * time.Second)
select {
case <-timedout:
return 0, fmt.Errorf("opengcs: copyWithTimeout: timed out (%s)", context)
case result = <-done:
if result.err != nil && result.err != io.EOF {
// See https://github.com/golang/go/blob/f3f29d1dea525f48995c1693c609f5e67c046893/src/os/exec/exec_windows.go for a clue as to why we are doing this :)
if se, ok := result.err.(syscall.Errno); ok {
const (
errNoData = syscall.Errno(232)
errBrokenPipe = syscall.Errno(109)
)
if se == errNoData || se == errBrokenPipe {
logrus.Debugf("opengcs: copyWithTimeout: hit NoData or BrokenPipe: %d: %s", se, context)
return result.bytes, nil
}
}
return 0, fmt.Errorf("opengcs: copyWithTimeout: error reading: '%s' after %d bytes (%s)", result.err, result.bytes, context)
}
}
logrus.Debugf("opengcs: copyWithTimeout: success - copied %d bytes (%s)", result.bytes, context)
return result.bytes, nil
}
// CopyFile is a utility for copying a file - used for the sandbox cache.
// Uses CopyFileW win32 API for performance
func CopyFile(srcFile, destFile string, overwrite bool) error {
var bFailIfExists uint32 = 1
if overwrite {
bFailIfExists = 0
}
lpExistingFileName, err := syscall.UTF16PtrFromString(srcFile)
if err != nil {
return err
}
lpNewFileName, err := syscall.UTF16PtrFromString(destFile)
if err != nil {
return err
}
r1, _, err := syscall.Syscall(
procCopyFileW.Addr(),
3,
uintptr(unsafe.Pointer(lpExistingFileName)),
uintptr(unsafe.Pointer(lpNewFileName)),
uintptr(bFailIfExists))
if r1 == 0 {
return fmt.Errorf("failed CopyFileW Win32 call from '%s' to '%s': %s", srcFile, destFile, err)
}
return nil
}

67
vendor/github.com/Microsoft/opengcs/client/vhdtotar.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// +build windows
package client
import (
"fmt"
"io"
"os"
"github.com/sirupsen/logrus"
)
// VhdToTar does what is says - it exports a VHD in a specified
// folder (either a read-only layer.vhd, or a read-write sandbox.vhd) to a
// ReadCloser containing a tar-stream of the layers contents.
func (config *Config) VhdToTar(vhdFile string, uvmMountPath string, isSandbox bool, vhdSize int64) (io.ReadCloser, error) {
logrus.Debugf("opengcs: VhdToTar: %s isSandbox: %t", vhdFile, isSandbox)
if config.Uvm == nil {
return nil, fmt.Errorf("cannot VhdToTar as no utility VM is in configuration")
}
vhdHandle, err := os.Open(vhdFile)
if err != nil {
return nil, fmt.Errorf("opengcs: VhdToTar: failed to open %s: %s", vhdFile, err)
}
defer vhdHandle.Close()
logrus.Debugf("opengcs: VhdToTar: exporting %s, size %d, isSandbox %t", vhdHandle.Name(), vhdSize, isSandbox)
// Different binary depending on whether a RO layer or a RW sandbox
command := "vhd2tar"
if isSandbox {
command = fmt.Sprintf("exportSandbox -path %s", uvmMountPath)
}
// Start the binary in the utility VM
process, err := config.createUtilsProcess(command)
if err != nil {
return nil, fmt.Errorf("opengcs: VhdToTar: %s: failed to create utils process %s: %s", vhdHandle.Name(), command, err)
}
if !isSandbox {
// Send the VHD contents to the utility VM processes stdin handle if not a sandbox
logrus.Debugf("opengcs: VhdToTar: copying the layer VHD into the utility VM")
if _, err = copyWithTimeout(process.Stdin, vhdHandle, vhdSize, config.UvmTimeoutSeconds, fmt.Sprintf("vhdtotarstream: sending %s to %s", vhdHandle.Name(), command)); err != nil {
process.Process.Close()
return nil, fmt.Errorf("opengcs: VhdToTar: %s: failed to copyWithTimeout on the stdin pipe (to utility VM): %s", vhdHandle.Name(), err)
}
}
// Start a goroutine which copies the stdout (ie the tar stream)
reader, writer := io.Pipe()
go func() {
defer writer.Close()
defer process.Process.Close()
logrus.Debugf("opengcs: VhdToTar: copying tar stream back from the utility VM")
bytes, err := copyWithTimeout(writer, process.Stdout, vhdSize, config.UvmTimeoutSeconds, fmt.Sprintf("vhdtotarstream: copy tarstream from %s", command))
if err != nil {
logrus.Errorf("opengcs: VhdToTar: %s: copyWithTimeout on the stdout pipe (from utility VM) failed: %s", vhdHandle.Name(), err)
}
logrus.Debugf("opengcs: VhdToTar: copied %d bytes of the tarstream of %s from the utility VM", bytes, vhdHandle.Name())
}()
// Return the read-side of the pipe connected to the goroutine which is reading from the stdout of the process in the utility VM
return reader, nil
}

View File

@ -10,6 +10,7 @@ import (
"strings" "strings"
"github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim"
"github.com/Microsoft/opengcs/client"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
@ -19,29 +20,46 @@ import (
// newContainerConfig generates a hcsshim container configuration from the // newContainerConfig generates a hcsshim container configuration from the
// provided OCI Spec // provided OCI Spec
func newContainerConfig(ctx context.Context, owner, id string, spec *specs.Spec) (*hcsshim.ContainerConfig, error) { func newContainerConfig(ctx context.Context, owner, id string, spec *specs.Spec) (*hcsshim.ContainerConfig, error) {
if len(spec.Windows.LayerFolders) == 0 {
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
"spec.Windows.LayerFolders cannot be empty")
}
var ( var (
layerFolders = spec.Windows.LayerFolders conf = &hcsshim.ContainerConfig{
conf = &hcsshim.ContainerConfig{ SystemType: "Container",
SystemType: "Container", Name: id,
Name: id, Owner: owner,
Owner: owner, HostName: spec.Hostname,
HostName: spec.Hostname,
IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
AllowUnqualifiedDNSQuery: spec.Windows.Network.AllowUnqualifiedDNSQuery,
EndpointList: spec.Windows.Network.EndpointList,
NetworkSharedContainerName: spec.Windows.Network.NetworkSharedContainerName,
} }
) )
if spec.Windows.CredentialSpec != nil { if spec.Windows.Network != nil {
conf.Credentials = spec.Windows.CredentialSpec.(string) conf.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
conf.EndpointList = spec.Windows.Network.EndpointList
conf.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
if spec.Windows.Network.DNSSearchList != nil {
conf.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
}
} }
return conf, nil
}
// newWindowsContainerConfig generates a hcsshim Windows container
// configuration from the provided OCI Spec
func newWindowsContainerConfig(ctx context.Context, owner, id string, spec *specs.Spec) (*hcsshim.ContainerConfig, error) {
conf, err := newContainerConfig(ctx, owner, id, spec)
if err != nil {
return nil, err
}
conf.IgnoreFlushesDuringBoot = spec.Windows.IgnoreFlushesDuringBoot
if len(spec.Windows.LayerFolders) < 1 {
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
"spec.Windows.LayerFolders must have at least 1 layers")
}
var (
layerFolders = spec.Windows.LayerFolders
homeDir = filepath.Dir(layerFolders[0])
layerFolderPath = filepath.Join(homeDir, id)
)
// TODO: use the create request Mount for those // TODO: use the create request Mount for those
for _, layerPath := range layerFolders { for _, layerPath := range layerFolders {
_, filename := filepath.Split(layerPath) _, filename := filepath.Split(layerPath)
@ -55,6 +73,60 @@ func newContainerConfig(ctx context.Context, owner, id string, spec *specs.Spec)
}) })
} }
var (
di = hcsshim.DriverInfo{
Flavour: 1, // filter driver
HomeDir: homeDir,
}
)
conf.LayerFolderPath = layerFolderPath
// TODO: Once there is a snapshotter for windows, this can be deleted.
// The R/W Layer should come from the Rootfs Mounts provided
//
// Windows doesn't support creating a container with a readonly
// filesystem, so always create a RW one
if err = hcsshim.CreateSandboxLayer(di, id, layerFolders[0], layerFolders); err != nil {
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
id, layerFolders, di)
}
defer func() {
if err != nil {
removeLayer(ctx, conf.LayerFolderPath)
}
}()
if err = hcsshim.ActivateLayer(di, id); err != nil {
return nil, errors.Wrapf(err, "failed to activate layer %s", conf.LayerFolderPath)
}
if err = hcsshim.PrepareLayer(di, id, layerFolders); err != nil {
return nil, errors.Wrapf(err, "failed to prepare layer %s", conf.LayerFolderPath)
}
conf.VolumePath, err = hcsshim.GetLayerMountPath(di, id)
if err != nil {
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
}
if spec.Windows.HyperV != nil {
conf.HvPartition = true
for _, layerPath := range layerFolders {
utilityVMPath := spec.Windows.HyperV.UtilityVMPath
_, err := os.Stat(utilityVMPath)
if err == nil {
conf.HvRuntime = &hcsshim.HvRuntime{ImagePath: utilityVMPath}
break
} else if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to access layer %s", layerPath)
}
}
}
if spec.Windows.CredentialSpec != nil {
conf.Credentials = spec.Windows.CredentialSpec.(string)
}
if len(spec.Mounts) > 0 { if len(spec.Mounts) > 0 {
mds := make([]hcsshim.MappedDir, len(spec.Mounts)) mds := make([]hcsshim.MappedDir, len(spec.Mounts))
for i, mount := range spec.Mounts { for i, mount := range spec.Mounts {
@ -72,59 +144,51 @@ func newContainerConfig(ctx context.Context, owner, id string, spec *specs.Spec)
conf.MappedDirectories = mds conf.MappedDirectories = mds
} }
if spec.Windows.Network.DNSSearchList != nil { return conf, nil
conf.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",") }
// newLinuxConfig generates a hcsshim Linux container configuration from the
// provided OCI Spec
func newLinuxConfig(ctx context.Context, owner, id string, spec *specs.Spec) (*hcsshim.ContainerConfig, error) {
conf, err := newContainerConfig(ctx, owner, id, spec)
if err != nil {
return nil, err
} }
if spec.Windows.HyperV != nil { conf.ContainerType = "Linux"
conf.HvPartition = true conf.HvPartition = true
for _, layerPath := range layerFolders {
utilityVMPath := spec.Windows.HyperV.UtilityVMPath
_, err := os.Stat(utilityVMPath)
if err == nil {
conf.HvRuntime = &hcsshim.HvRuntime{ImagePath: utilityVMPath}
break
} else if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to access layer %s", layerPath)
}
}
}
if len(spec.Windows.LayerFolders) < 1 {
return nil, errors.Wrap(errdefs.ErrInvalidArgument,
"spec.Windows.LayerFolders must have at least 1 layer")
}
var ( var (
err error layerFolders = spec.Windows.LayerFolders
di = hcsshim.DriverInfo{
Flavour: 1, // filter driver
HomeDir: filepath.Dir(layerFolders[0]),
}
) )
// TODO: Once there is a snapshotter for windows, this can be deleted. config := &client.Config{}
// The R/W Layer should come from the Rootfs Mounts provided if err := config.GenerateDefault(nil); err != nil {
// return nil, err
// Windows doesn't support creating a container with a readonly
// filesystem, so always create a RW one
if err = hcsshim.CreateSandboxLayer(di, id, layerFolders[0], layerFolders); err != nil {
return nil, errors.Wrapf(err, "failed to create sandbox layer for %s: layers: %#v, driverInfo: %#v",
id, layerFolders, di)
} }
conf.LayerFolderPath = filepath.Join(di.HomeDir, id)
defer func() { conf.HvRuntime = &hcsshim.HvRuntime{
ImagePath: config.KirdPath,
LinuxKernelFile: config.KernelFile,
LinuxInitrdFile: config.InitrdFile,
LinuxBootParameters: config.BootParameters,
}
// TODO: use the create request Mount for those
for _, layerPath := range layerFolders {
_, filename := filepath.Split(layerPath)
guid, err := hcsshim.NameToGuid(filename)
if err != nil { if err != nil {
removeLayer(ctx, conf.LayerFolderPath) return nil, errors.Wrapf(err, "unable to get GUID for %s", filename)
} }
}() conf.Layers = append(conf.Layers, hcsshim.Layer{
ID: guid.ToString(),
if err = hcsshim.ActivateLayer(di, id); err != nil { Path: filepath.Join(layerPath, "layer.vhd"),
return nil, errors.Wrapf(err, "failed to activate layer %s", conf.LayerFolderPath) })
}
if err = hcsshim.PrepareLayer(di, id, layerFolders); err != nil {
return nil, errors.Wrapf(err, "failed to prepare layer %s", conf.LayerFolderPath)
}
conf.VolumePath, err = hcsshim.GetLayerMountPath(di, id)
if err != nil {
return nil, errors.Wrapf(err, "failed to getmount path for layer %s: driverInfo: %#v", id, di)
} }
return conf, nil return conf, nil
@ -165,21 +229,23 @@ func removeLayer(ctx context.Context, path string) error {
return nil return nil
} }
func newProcessConfig(spec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig { func newProcessConfig(processSpec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig {
conf := &hcsshim.ProcessConfig{ conf := &hcsshim.ProcessConfig{
EmulateConsole: pset.src.Terminal, EmulateConsole: pset.src.Terminal,
CreateStdInPipe: pset.stdin != nil, CreateStdInPipe: pset.stdin != nil,
CreateStdOutPipe: pset.stdout != nil, CreateStdOutPipe: pset.stdout != nil,
CreateStdErrPipe: pset.stderr != nil, CreateStdErrPipe: pset.stderr != nil,
User: spec.User.Username, User: processSpec.User.Username,
CommandLine: strings.Join(spec.Args, " "),
Environment: make(map[string]string), Environment: make(map[string]string),
WorkingDirectory: spec.Cwd, WorkingDirectory: processSpec.Cwd,
ConsoleSize: [2]uint{spec.ConsoleSize.Height, spec.ConsoleSize.Width}, }
if processSpec.ConsoleSize != nil {
conf.ConsoleSize = [2]uint{processSpec.ConsoleSize.Height, processSpec.ConsoleSize.Width}
} }
// Convert OCI Env format to HCS's // Convert OCI Env format to HCS's
for _, s := range spec.Env { for _, s := range processSpec.Env {
arr := strings.SplitN(s, "=", 2) arr := strings.SplitN(s, "=", 2)
if len(arr) == 2 { if len(arr) == 2 {
conf.Environment[arr[0]] = arr[1] conf.Environment[arr[0]] = arr[1]
@ -188,3 +254,15 @@ func newProcessConfig(spec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig
return conf return conf
} }
func newWindowsProcessConfig(processSpec *specs.Process, pset *pipeSet) *hcsshim.ProcessConfig {
conf := newProcessConfig(processSpec, pset)
conf.CommandLine = strings.Join(processSpec.Args, " ")
return conf
}
func newLinuxProcessConfig(processSpec *specs.Process, pset *pipeSet) (*hcsshim.ProcessConfig, error) {
conf := newProcessConfig(processSpec, pset)
conf.CommandArgs = processSpec.Args
return conf, nil
}

View File

@ -243,7 +243,7 @@ func (r *windowsRuntime) newTask(ctx context.Context, namespace, id string, spec
conf *hcsshim.ContainerConfig conf *hcsshim.ContainerConfig
nsid = namespace + "-" + id nsid = namespace + "-" + id
) )
if conf, err = newContainerConfig(ctx, hcsshimOwner, nsid, spec); err != nil { if conf, err = newWindowsContainerConfig(ctx, hcsshimOwner, nsid, spec); err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {

View File

@ -111,7 +111,7 @@ func (t *task) Info() runtime.TaskInfo {
} }
func (t *task) Start(ctx context.Context) error { func (t *task) Start(ctx context.Context) error {
conf := newProcessConfig(t.spec.Process, t.io) conf := newWindowsProcessConfig(t.spec.Process, t.io)
p, err := t.newProcess(ctx, t.id, conf, t.io) p, err := t.newProcess(ctx, t.id, conf, t.io)
if err != nil { if err != nil {
return err return err
@ -194,7 +194,7 @@ func (t *task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runt
return nil, err return nil, err
} }
conf := newProcessConfig(spec, pset) conf := newWindowsProcessConfig(spec, pset)
p, err := t.newProcess(ctx, id, conf, pset) p, err := t.newProcess(ctx, id, conf, pset)
if err != nil { if err != nil {
return nil, err return nil, err