Update containerd to f2a20ead83
.
Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
parent
8777224600
commit
c60dd60f80
@ -2,7 +2,7 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
|||||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||||
github.com/containerd/cgroups 1152b960fcee041f50df15cdc67c29dbccf801ef
|
github.com/containerd/cgroups 1152b960fcee041f50df15cdc67c29dbccf801ef
|
||||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||||
github.com/containerd/containerd c60a5fd19073636432c7318cee039f01f367ccd8
|
github.com/containerd/containerd f2a20ead833f8caf3ffc12be058d6ce668b4ebed
|
||||||
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
||||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||||
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
|
||||||
@ -28,8 +28,8 @@ github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
|||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.1
|
||||||
github.com/json-iterator/go 1.1.5
|
github.com/json-iterator/go 1.1.5
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||||
github.com/Microsoft/go-winio v0.4.11
|
github.com/Microsoft/go-winio c599b533b43b1363d7d7c6cfda5ede70ed73ff13
|
||||||
github.com/Microsoft/hcsshim v0.8.5
|
github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517
|
||||||
github.com/modern-go/concurrent 1.0.3
|
github.com/modern-go/concurrent 1.0.3
|
||||||
github.com/modern-go/reflect2 1.0.1
|
github.com/modern-go/reflect2 1.0.1
|
||||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||||
|
15
vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// Package etw provides support for TraceLogging-based ETW (Event Tracing
|
||||||
|
// for Windows). TraceLogging is a format of ETW events that are self-describing
|
||||||
|
// (the event contains information on its own schema). This allows them to be
|
||||||
|
// decoded without needing a separate manifest with event information. The
|
||||||
|
// implementation here is based on the information found in
|
||||||
|
// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a
|
||||||
|
// set of C macros.
|
||||||
|
package etw
|
||||||
|
|
||||||
|
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go etw.go
|
||||||
|
|
||||||
|
//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister
|
||||||
|
//sys eventUnregister(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister
|
||||||
|
//sys eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
||||||
|
//sys eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
65
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go
generated
vendored
Normal file
65
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
)
|
||||||
|
|
||||||
|
// eventData maintains a buffer which builds up the data for an ETW event. It
|
||||||
|
// needs to be paired with EventMetadata which describes the event.
|
||||||
|
type eventData struct {
|
||||||
|
buffer bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytes returns the raw binary data containing the event data. The returned
|
||||||
|
// value is not copied from the internal buffer, so it can be mutated by the
|
||||||
|
// eventData object after it is returned.
|
||||||
|
func (ed *eventData) bytes() []byte {
|
||||||
|
return ed.buffer.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeString appends a string, including the null terminator, to the buffer.
|
||||||
|
func (ed *eventData) writeString(data string) {
|
||||||
|
ed.buffer.WriteString(data)
|
||||||
|
ed.buffer.WriteByte(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInt8 appends a int8 to the buffer.
|
||||||
|
func (ed *eventData) writeInt8(value int8) {
|
||||||
|
ed.buffer.WriteByte(uint8(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInt16 appends a int16 to the buffer.
|
||||||
|
func (ed *eventData) writeInt16(value int16) {
|
||||||
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInt32 appends a int32 to the buffer.
|
||||||
|
func (ed *eventData) writeInt32(value int32) {
|
||||||
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeInt64 appends a int64 to the buffer.
|
||||||
|
func (ed *eventData) writeInt64(value int64) {
|
||||||
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeUint8 appends a uint8 to the buffer.
|
||||||
|
func (ed *eventData) writeUint8(value uint8) {
|
||||||
|
ed.buffer.WriteByte(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeUint16 appends a uint16 to the buffer.
|
||||||
|
func (ed *eventData) writeUint16(value uint16) {
|
||||||
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeUint32 appends a uint32 to the buffer.
|
||||||
|
func (ed *eventData) writeUint32(value uint32) {
|
||||||
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeUint64 appends a uint64 to the buffer.
|
||||||
|
func (ed *eventData) writeUint64(value uint64) {
|
||||||
|
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||||
|
}
|
29
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go
generated
vendored
Normal file
29
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type eventDataDescriptorType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
eventDataDescriptorTypeUserData eventDataDescriptorType = iota
|
||||||
|
eventDataDescriptorTypeEventMetadata
|
||||||
|
eventDataDescriptorTypeProviderMetadata
|
||||||
|
)
|
||||||
|
|
||||||
|
type eventDataDescriptor struct {
|
||||||
|
ptr ptr64
|
||||||
|
size uint32
|
||||||
|
dataType eventDataDescriptorType
|
||||||
|
reserved1 uint8
|
||||||
|
reserved2 uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor {
|
||||||
|
return eventDataDescriptor{
|
||||||
|
ptr: ptr64{ptr: unsafe.Pointer(&buffer[0])},
|
||||||
|
size: uint32(len(buffer)),
|
||||||
|
dataType: dataType,
|
||||||
|
}
|
||||||
|
}
|
67
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go
generated
vendored
Normal file
67
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
// Channel represents the ETW logging channel that is used. It can be used by
|
||||||
|
// event consumers to give an event special treatment.
|
||||||
|
type Channel uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ChannelTraceLogging is the default channel for TraceLogging events. It is
|
||||||
|
// not required to be used for TraceLogging, but will prevent decoding
|
||||||
|
// issues for these events on older operating systems.
|
||||||
|
ChannelTraceLogging Channel = 11
|
||||||
|
)
|
||||||
|
|
||||||
|
// Level represents the ETW logging level. There are several predefined levels
|
||||||
|
// that are commonly used, but technically anything from 0-255 is allowed.
|
||||||
|
// Lower levels indicate more important events, and 0 indicates an event that
|
||||||
|
// will always be collected.
|
||||||
|
type Level uint8
|
||||||
|
|
||||||
|
// Predefined ETW log levels.
|
||||||
|
const (
|
||||||
|
LevelAlways Level = iota
|
||||||
|
LevelCritical
|
||||||
|
LevelError
|
||||||
|
LevelWarning
|
||||||
|
LevelInfo
|
||||||
|
LevelVerbose
|
||||||
|
)
|
||||||
|
|
||||||
|
// EventDescriptor represents various metadata for an ETW event.
|
||||||
|
type eventDescriptor struct {
|
||||||
|
id uint16
|
||||||
|
version uint8
|
||||||
|
channel Channel
|
||||||
|
level Level
|
||||||
|
opcode uint8
|
||||||
|
task uint16
|
||||||
|
keyword uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEventDescriptor returns an EventDescriptor initialized for use with
|
||||||
|
// TraceLogging.
|
||||||
|
func newEventDescriptor() *eventDescriptor {
|
||||||
|
// Standard TraceLogging events default to the TraceLogging channel, and
|
||||||
|
// verbose level.
|
||||||
|
return &eventDescriptor{
|
||||||
|
channel: ChannelTraceLogging,
|
||||||
|
level: LevelVerbose,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Identity returns the identity of the event. If the identity is not 0, it
|
||||||
|
// should uniquely identify the other event metadata (contained in
|
||||||
|
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
|
||||||
|
// are relevant.
|
||||||
|
func (ed *eventDescriptor) identity() uint32 {
|
||||||
|
return (uint32(ed.version) << 16) | uint32(ed.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentity sets the identity of the event. If the identity is not 0, it
|
||||||
|
// should uniquely identify the other event metadata (contained in
|
||||||
|
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
|
||||||
|
// are relevant.
|
||||||
|
func (ed *eventDescriptor) setIdentity(identity uint32) {
|
||||||
|
ed.id = uint16(identity)
|
||||||
|
ed.version = uint8(identity >> 16)
|
||||||
|
}
|
177
vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go
generated
vendored
Normal file
177
vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
)
|
||||||
|
|
||||||
|
// inType indicates the type of data contained in the ETW event.
|
||||||
|
type inType byte
|
||||||
|
|
||||||
|
// Various inType definitions for TraceLogging. These must match the definitions
|
||||||
|
// found in TraceLoggingProvider.h in the Windows SDK.
|
||||||
|
const (
|
||||||
|
inTypeNull inType = iota
|
||||||
|
inTypeUnicodeString
|
||||||
|
inTypeANSIString
|
||||||
|
inTypeInt8
|
||||||
|
inTypeUint8
|
||||||
|
inTypeInt16
|
||||||
|
inTypeUint16
|
||||||
|
inTypeInt32
|
||||||
|
inTypeUint32
|
||||||
|
inTypeInt64
|
||||||
|
inTypeUint64
|
||||||
|
inTypeFloat
|
||||||
|
inTypeDouble
|
||||||
|
inTypeBool32
|
||||||
|
inTypeBinary
|
||||||
|
inTypeGUID
|
||||||
|
inTypePointerUnsupported
|
||||||
|
inTypeFileTime
|
||||||
|
inTypeSystemTime
|
||||||
|
inTypeSID
|
||||||
|
inTypeHexInt32
|
||||||
|
inTypeHexInt64
|
||||||
|
inTypeCountedString
|
||||||
|
inTypeCountedANSIString
|
||||||
|
inTypeStruct
|
||||||
|
inTypeCountedBinary
|
||||||
|
inTypeCountedArray inType = 32
|
||||||
|
inTypeArray inType = 64
|
||||||
|
)
|
||||||
|
|
||||||
|
// outType specifies a hint to the event decoder for how the value should be
|
||||||
|
// formatted.
|
||||||
|
type outType byte
|
||||||
|
|
||||||
|
// Various outType definitions for TraceLogging. These must match the
|
||||||
|
// definitions found in TraceLoggingProvider.h in the Windows SDK.
|
||||||
|
const (
|
||||||
|
// outTypeDefault indicates that the default formatting for the inType will
|
||||||
|
// be used by the event decoder.
|
||||||
|
outTypeDefault outType = iota
|
||||||
|
outTypeNoPrint
|
||||||
|
outTypeString
|
||||||
|
outTypeBoolean
|
||||||
|
outTypeHex
|
||||||
|
outTypePID
|
||||||
|
outTypeTID
|
||||||
|
outTypePort
|
||||||
|
outTypeIPv4
|
||||||
|
outTypeIPv6
|
||||||
|
outTypeSocketAddress
|
||||||
|
outTypeXML
|
||||||
|
outTypeJSON
|
||||||
|
outTypeWin32Error
|
||||||
|
outTypeNTStatus
|
||||||
|
outTypeHResult
|
||||||
|
outTypeFileTime
|
||||||
|
outTypeSigned
|
||||||
|
outTypeUnsigned
|
||||||
|
outTypeUTF8 outType = 35
|
||||||
|
outTypePKCS7WithTypeInfo outType = 36
|
||||||
|
outTypeCodePointer outType = 37
|
||||||
|
outTypeDateTimeUTC outType = 38
|
||||||
|
)
|
||||||
|
|
||||||
|
// eventMetadata maintains a buffer which builds up the metadata for an ETW
|
||||||
|
// event. It needs to be paired with EventData which describes the event.
|
||||||
|
type eventMetadata struct {
|
||||||
|
buffer bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytes returns the raw binary data containing the event metadata. Before being
|
||||||
|
// returned, the current size of the buffer is written to the start of the
|
||||||
|
// buffer. The returned value is not copied from the internal buffer, so it can
|
||||||
|
// be mutated by the eventMetadata object after it is returned.
|
||||||
|
func (em *eventMetadata) bytes() []byte {
|
||||||
|
// Finalize the event metadata buffer by filling in the buffer length at the
|
||||||
|
// beginning.
|
||||||
|
binary.LittleEndian.PutUint16(em.buffer.Bytes(), uint16(em.buffer.Len()))
|
||||||
|
return em.buffer.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeEventHeader writes the metadata for the start of an event to the buffer.
|
||||||
|
// This specifies the event name and tags.
|
||||||
|
func (em *eventMetadata) writeEventHeader(name string, tags uint32) {
|
||||||
|
binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder
|
||||||
|
em.writeTags(tags)
|
||||||
|
em.buffer.WriteString(name)
|
||||||
|
em.buffer.WriteByte(0) // Null terminator for name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (em *eventMetadata) writeFieldInner(name string, inType inType, outType outType, tags uint32, arrSize uint16) {
|
||||||
|
em.buffer.WriteString(name)
|
||||||
|
em.buffer.WriteByte(0) // Null terminator for name
|
||||||
|
|
||||||
|
if outType == outTypeDefault && tags == 0 {
|
||||||
|
em.buffer.WriteByte(byte(inType))
|
||||||
|
} else {
|
||||||
|
em.buffer.WriteByte(byte(inType | 128))
|
||||||
|
if tags == 0 {
|
||||||
|
em.buffer.WriteByte(byte(outType))
|
||||||
|
} else {
|
||||||
|
em.buffer.WriteByte(byte(outType | 128))
|
||||||
|
em.writeTags(tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if arrSize != 0 {
|
||||||
|
binary.Write(&em.buffer, binary.LittleEndian, arrSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeTags writes out the tags value to the event metadata. Tags is a 28-bit
|
||||||
|
// value, interpreted as bit flags, which are only relevant to the event
|
||||||
|
// consumer. The event consumer may choose to attribute special meaning to tags
|
||||||
|
// (e.g. 0x4 could mean the field contains PII). Tags are written as a series of
|
||||||
|
// bytes, each containing 7 bits of tag value, with the high bit set if there is
|
||||||
|
// more tag data in the following byte. This allows for a more compact
|
||||||
|
// representation when not all of the tag bits are needed.
|
||||||
|
func (em *eventMetadata) writeTags(tags uint32) {
|
||||||
|
// Only use the top 28 bits of the tags value.
|
||||||
|
tags &= 0xfffffff
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Tags are written with the most significant bits (e.g. 21-27) first.
|
||||||
|
val := tags >> 21
|
||||||
|
|
||||||
|
if tags&0x1fffff == 0 {
|
||||||
|
// If there is no more data to write after this, write this value
|
||||||
|
// without the high bit set, and return.
|
||||||
|
em.buffer.WriteByte(byte(val & 0x7f))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
em.buffer.WriteByte(byte(val | 0x80))
|
||||||
|
|
||||||
|
tags <<= 7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeField writes the metadata for a simple field to the buffer.
|
||||||
|
func (em *eventMetadata) writeField(name string, inType inType, outType outType, tags uint32) {
|
||||||
|
em.writeFieldInner(name, inType, outType, tags, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeArray writes the metadata for an array field to the buffer. The number
|
||||||
|
// of elements in the array must be written as a uint16 in the event data,
|
||||||
|
// immediately preceeding the event data.
|
||||||
|
func (em *eventMetadata) writeArray(name string, inType inType, outType outType, tags uint32) {
|
||||||
|
em.writeFieldInner(name, inType|inTypeArray, outType, tags, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeCountedArray writes the metadata for an array field to the buffer. The
|
||||||
|
// size of a counted array is fixed, and the size is written into the metadata
|
||||||
|
// directly.
|
||||||
|
func (em *eventMetadata) writeCountedArray(name string, count uint16, inType inType, outType outType, tags uint32) {
|
||||||
|
em.writeFieldInner(name, inType|inTypeCountedArray, outType, tags, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeStruct writes the metadata for a nested struct to the buffer. The struct
|
||||||
|
// contains the next N fields in the metadata, where N is specified by the
|
||||||
|
// fieldCount argument.
|
||||||
|
func (em *eventMetadata) writeStruct(name string, fieldCount uint8, tags uint32) {
|
||||||
|
em.writeFieldInner(name, inTypeStruct, outType(fieldCount), tags, 0)
|
||||||
|
}
|
63
vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go
generated
vendored
Normal file
63
vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
type eventOptions struct {
|
||||||
|
descriptor *eventDescriptor
|
||||||
|
activityID *windows.GUID
|
||||||
|
relatedActivityID *windows.GUID
|
||||||
|
tags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// EventOpt defines the option function type that can be passed to
|
||||||
|
// Provider.WriteEvent to specify general event options, such as level and
|
||||||
|
// keyword.
|
||||||
|
type EventOpt func(options *eventOptions)
|
||||||
|
|
||||||
|
// WithEventOpts returns the variadic arguments as a single slice.
|
||||||
|
func WithEventOpts(opts ...EventOpt) []EventOpt {
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLevel specifies the level of the event to be written.
|
||||||
|
func WithLevel(level Level) EventOpt {
|
||||||
|
return func(options *eventOptions) {
|
||||||
|
options.descriptor.level = level
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithKeyword specifies the keywords of the event to be written. Multiple uses
|
||||||
|
// of this option are OR'd together.
|
||||||
|
func WithKeyword(keyword uint64) EventOpt {
|
||||||
|
return func(options *eventOptions) {
|
||||||
|
options.descriptor.keyword |= keyword
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithChannel(channel Channel) EventOpt {
|
||||||
|
return func(options *eventOptions) {
|
||||||
|
options.descriptor.channel = channel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTags specifies the tags of the event to be written. Tags is a 28-bit
|
||||||
|
// value (top 4 bits are ignored) which are interpreted by the event consumer.
|
||||||
|
func WithTags(newTags uint32) EventOpt {
|
||||||
|
return func(options *eventOptions) {
|
||||||
|
options.tags |= newTags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithActivityID(activityID *windows.GUID) EventOpt {
|
||||||
|
return func(options *eventOptions) {
|
||||||
|
options.activityID = activityID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithRelatedActivityID(activityID *windows.GUID) EventOpt {
|
||||||
|
return func(options *eventOptions) {
|
||||||
|
options.relatedActivityID = activityID
|
||||||
|
}
|
||||||
|
}
|
379
vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go
generated
vendored
Normal file
379
vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go
generated
vendored
Normal file
@ -0,0 +1,379 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FieldOpt defines the option function type that can be passed to
|
||||||
|
// Provider.WriteEvent to add fields to the event.
|
||||||
|
type FieldOpt func(em *eventMetadata, ed *eventData)
|
||||||
|
|
||||||
|
// WithFields returns the variadic arguments as a single slice.
|
||||||
|
func WithFields(opts ...FieldOpt) []FieldOpt {
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolField adds a single bool field to the event.
|
||||||
|
func BoolField(name string, value bool) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeUint8, outTypeBoolean, 0)
|
||||||
|
bool8 := uint8(0)
|
||||||
|
if value {
|
||||||
|
bool8 = uint8(1)
|
||||||
|
}
|
||||||
|
ed.writeUint8(bool8)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolArray adds an array of bool to the event.
|
||||||
|
func BoolArray(name string, values []bool) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeUint8, outTypeBoolean, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
bool8 := uint8(0)
|
||||||
|
if v {
|
||||||
|
bool8 = uint8(1)
|
||||||
|
}
|
||||||
|
ed.writeUint8(bool8)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringField adds a single string field to the event.
|
||||||
|
func StringField(name string, value string) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeANSIString, outTypeUTF8, 0)
|
||||||
|
ed.writeString(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringArray adds an array of string to the event.
|
||||||
|
func StringArray(name string, values []string) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeANSIString, outTypeUTF8, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeString(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntField adds a single int field to the event.
|
||||||
|
func IntField(name string, value int) FieldOpt {
|
||||||
|
switch unsafe.Sizeof(value) {
|
||||||
|
case 4:
|
||||||
|
return Int32Field(name, int32(value))
|
||||||
|
case 8:
|
||||||
|
return Int64Field(name, int64(value))
|
||||||
|
default:
|
||||||
|
panic("Unsupported int size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntArray adds an array of int to the event.
|
||||||
|
func IntArray(name string, values []int) FieldOpt {
|
||||||
|
inType := inTypeNull
|
||||||
|
var writeItem func(*eventData, int)
|
||||||
|
switch unsafe.Sizeof(values[0]) {
|
||||||
|
case 4:
|
||||||
|
inType = inTypeInt32
|
||||||
|
writeItem = func(ed *eventData, item int) { ed.writeInt32(int32(item)) }
|
||||||
|
case 8:
|
||||||
|
inType = inTypeInt64
|
||||||
|
writeItem = func(ed *eventData, item int) { ed.writeInt64(int64(item)) }
|
||||||
|
default:
|
||||||
|
panic("Unsupported int size")
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inType, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
writeItem(ed, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8Field adds a single int8 field to the event.
|
||||||
|
func Int8Field(name string, value int8) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeInt8, outTypeDefault, 0)
|
||||||
|
ed.writeInt8(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8Array adds an array of int8 to the event.
|
||||||
|
func Int8Array(name string, values []int8) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeInt8, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeInt8(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16Field adds a single int16 field to the event.
|
||||||
|
func Int16Field(name string, value int16) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeInt16, outTypeDefault, 0)
|
||||||
|
ed.writeInt16(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16Array adds an array of int16 to the event.
|
||||||
|
func Int16Array(name string, values []int16) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeInt16, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeInt16(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Field adds a single int32 field to the event.
|
||||||
|
func Int32Field(name string, value int32) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeInt32, outTypeDefault, 0)
|
||||||
|
ed.writeInt32(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Array adds an array of int32 to the event.
|
||||||
|
func Int32Array(name string, values []int32) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeInt32, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeInt32(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Field adds a single int64 field to the event.
|
||||||
|
func Int64Field(name string, value int64) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeInt64, outTypeDefault, 0)
|
||||||
|
ed.writeInt64(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Array adds an array of int64 to the event.
|
||||||
|
func Int64Array(name string, values []int64) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeInt64, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeInt64(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintField adds a single uint field to the event.
|
||||||
|
func UintField(name string, value uint) FieldOpt {
|
||||||
|
switch unsafe.Sizeof(value) {
|
||||||
|
case 4:
|
||||||
|
return Uint32Field(name, uint32(value))
|
||||||
|
case 8:
|
||||||
|
return Uint64Field(name, uint64(value))
|
||||||
|
default:
|
||||||
|
panic("Unsupported uint size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintArray adds an array of uint to the event.
|
||||||
|
func UintArray(name string, values []uint) FieldOpt {
|
||||||
|
inType := inTypeNull
|
||||||
|
var writeItem func(*eventData, uint)
|
||||||
|
switch unsafe.Sizeof(values[0]) {
|
||||||
|
case 4:
|
||||||
|
inType = inTypeUint32
|
||||||
|
writeItem = func(ed *eventData, item uint) { ed.writeUint32(uint32(item)) }
|
||||||
|
case 8:
|
||||||
|
inType = inTypeUint64
|
||||||
|
writeItem = func(ed *eventData, item uint) { ed.writeUint64(uint64(item)) }
|
||||||
|
default:
|
||||||
|
panic("Unsupported uint size")
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inType, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
writeItem(ed, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8Field adds a single uint8 field to the event.
|
||||||
|
func Uint8Field(name string, value uint8) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeUint8, outTypeDefault, 0)
|
||||||
|
ed.writeUint8(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8Array adds an array of uint8 to the event.
|
||||||
|
func Uint8Array(name string, values []uint8) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeUint8, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeUint8(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16Field adds a single uint16 field to the event.
|
||||||
|
func Uint16Field(name string, value uint16) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeUint16, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16Array adds an array of uint16 to the event.
|
||||||
|
func Uint16Array(name string, values []uint16) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeUint16, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeUint16(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32Field adds a single uint32 field to the event.
|
||||||
|
func Uint32Field(name string, value uint32) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeUint32, outTypeDefault, 0)
|
||||||
|
ed.writeUint32(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32Array adds an array of uint32 to the event.
|
||||||
|
func Uint32Array(name string, values []uint32) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeUint32, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeUint32(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64Field adds a single uint64 field to the event.
|
||||||
|
func Uint64Field(name string, value uint64) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeUint64, outTypeDefault, 0)
|
||||||
|
ed.writeUint64(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64Array adds an array of uint64 to the event.
|
||||||
|
func Uint64Array(name string, values []uint64) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeUint64, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeUint64(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintptrField adds a single uintptr field to the event.
|
||||||
|
func UintptrField(name string, value uintptr) FieldOpt {
|
||||||
|
inType := inTypeNull
|
||||||
|
var writeItem func(*eventData, uintptr)
|
||||||
|
switch unsafe.Sizeof(value) {
|
||||||
|
case 4:
|
||||||
|
inType = inTypeHexInt32
|
||||||
|
writeItem = func(ed *eventData, item uintptr) { ed.writeUint32(uint32(item)) }
|
||||||
|
case 8:
|
||||||
|
inType = inTypeHexInt64
|
||||||
|
writeItem = func(ed *eventData, item uintptr) { ed.writeUint64(uint64(item)) }
|
||||||
|
default:
|
||||||
|
panic("Unsupported uintptr size")
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inType, outTypeDefault, 0)
|
||||||
|
writeItem(ed, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintptrArray adds an array of uintptr to the event.
|
||||||
|
func UintptrArray(name string, values []uintptr) FieldOpt {
|
||||||
|
inType := inTypeNull
|
||||||
|
var writeItem func(*eventData, uintptr)
|
||||||
|
switch unsafe.Sizeof(values[0]) {
|
||||||
|
case 4:
|
||||||
|
inType = inTypeHexInt32
|
||||||
|
writeItem = func(ed *eventData, item uintptr) { ed.writeUint32(uint32(item)) }
|
||||||
|
case 8:
|
||||||
|
inType = inTypeHexInt64
|
||||||
|
writeItem = func(ed *eventData, item uintptr) { ed.writeUint64(uint64(item)) }
|
||||||
|
default:
|
||||||
|
panic("Unsupported uintptr size")
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inType, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
writeItem(ed, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32Field adds a single float32 field to the event.
|
||||||
|
func Float32Field(name string, value float32) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeFloat, outTypeDefault, 0)
|
||||||
|
ed.writeUint32(math.Float32bits(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32Array adds an array of float32 to the event.
|
||||||
|
func Float32Array(name string, values []float32) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeFloat, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeUint32(math.Float32bits(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Field adds a single float64 field to the event.
|
||||||
|
func Float64Field(name string, value float64) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeField(name, inTypeDouble, outTypeDefault, 0)
|
||||||
|
ed.writeUint64(math.Float64bits(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Array adds an array of float64 to the event.
|
||||||
|
func Float64Array(name string, values []float64) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeArray(name, inTypeDouble, outTypeDefault, 0)
|
||||||
|
ed.writeUint16(uint16(len(values)))
|
||||||
|
for _, v := range values {
|
||||||
|
ed.writeUint64(math.Float64bits(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Struct adds a nested struct to the event, the FieldOpts in the opts argument
|
||||||
|
// are used to specify the fields of the struct.
|
||||||
|
func Struct(name string, opts ...FieldOpt) FieldOpt {
|
||||||
|
return func(em *eventMetadata, ed *eventData) {
|
||||||
|
em.writeStruct(name, uint8(len(opts)), 0)
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(em, ed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
279
vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go
generated
vendored
Normal file
279
vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provider represents an ETW event provider. It is identified by a provider
|
||||||
|
// name and ID (GUID), which should always have a 1:1 mapping to each other
|
||||||
|
// (e.g. don't use multiple provider names with the same ID, or vice versa).
|
||||||
|
type Provider struct {
|
||||||
|
ID *windows.GUID
|
||||||
|
handle providerHandle
|
||||||
|
metadata []byte
|
||||||
|
callback EnableCallback
|
||||||
|
index uint
|
||||||
|
enabled bool
|
||||||
|
level Level
|
||||||
|
keywordAny uint64
|
||||||
|
keywordAll uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the `provider`.ID as a string
|
||||||
|
func (provider *Provider) String() string {
|
||||||
|
data1 := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(data1, provider.ID.Data1)
|
||||||
|
data2 := make([]byte, 2)
|
||||||
|
binary.BigEndian.PutUint16(data2, provider.ID.Data2)
|
||||||
|
data3 := make([]byte, 2)
|
||||||
|
binary.BigEndian.PutUint16(data3, provider.ID.Data3)
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"%s-%s-%s-%s-%s",
|
||||||
|
hex.EncodeToString(data1),
|
||||||
|
hex.EncodeToString(data2),
|
||||||
|
hex.EncodeToString(data3),
|
||||||
|
hex.EncodeToString(provider.ID.Data4[:2]),
|
||||||
|
hex.EncodeToString(provider.ID.Data4[2:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
type providerHandle windows.Handle
|
||||||
|
|
||||||
|
// ProviderState informs the provider EnableCallback what action is being
|
||||||
|
// performed.
|
||||||
|
type ProviderState uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ProviderStateDisable indicates the provider is being disabled.
|
||||||
|
ProviderStateDisable ProviderState = iota
|
||||||
|
// ProviderStateEnable indicates the provider is being enabled.
|
||||||
|
ProviderStateEnable
|
||||||
|
// ProviderStateCaptureState indicates the provider is having its current
|
||||||
|
// state snap-shotted.
|
||||||
|
ProviderStateCaptureState
|
||||||
|
)
|
||||||
|
|
||||||
|
type eventInfoClass uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota
|
||||||
|
eventInfoClassProviderSetReserved1
|
||||||
|
eventInfoClassProviderSetTraits
|
||||||
|
eventInfoClassProviderUseDescriptorType
|
||||||
|
)
|
||||||
|
|
||||||
|
// EnableCallback is the form of the callback function that receives provider
|
||||||
|
// enable/disable notifications from ETW.
|
||||||
|
type EnableCallback func(*windows.GUID, ProviderState, Level, uint64, uint64, uintptr)
|
||||||
|
|
||||||
|
func providerCallback(sourceID *windows.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) {
|
||||||
|
provider := providers.getProvider(uint(i))
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
case ProviderStateDisable:
|
||||||
|
provider.enabled = false
|
||||||
|
case ProviderStateEnable:
|
||||||
|
provider.enabled = true
|
||||||
|
provider.level = level
|
||||||
|
provider.keywordAny = matchAnyKeyword
|
||||||
|
provider.keywordAll = matchAllKeyword
|
||||||
|
}
|
||||||
|
|
||||||
|
if provider.callback != nil {
|
||||||
|
provider.callback(sourceID, state, level, matchAnyKeyword, matchAllKeyword, filterData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// providerCallbackAdapter acts as the first-level callback from the C/ETW side
|
||||||
|
// for provider notifications. Because Go has trouble with callback arguments of
|
||||||
|
// different size, it has only pointer-sized arguments, which are then cast to
|
||||||
|
// the appropriate types when calling providerCallback.
|
||||||
|
func providerCallbackAdapter(sourceID *windows.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr {
|
||||||
|
providerCallback(sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// providerIDFromName generates a provider ID based on the provider name. It
|
||||||
|
// uses the same algorithm as used by .NET's EventSource class, which is based
|
||||||
|
// on RFC 4122. More information on the algorithm can be found here:
|
||||||
|
// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/
|
||||||
|
// The algorithm is roughly:
|
||||||
|
// Hash = Sha1(namespace + arg.ToUpper().ToUtf16be())
|
||||||
|
// Guid = Hash[0..15], with Hash[7] tweaked according to RFC 4122
|
||||||
|
func providerIDFromName(name string) *windows.GUID {
|
||||||
|
buffer := sha1.New()
|
||||||
|
|
||||||
|
namespace := []byte{0x48, 0x2C, 0x2D, 0xB2, 0xC3, 0x90, 0x47, 0xC8, 0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}
|
||||||
|
buffer.Write(namespace)
|
||||||
|
|
||||||
|
binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name))))
|
||||||
|
|
||||||
|
sum := buffer.Sum(nil)
|
||||||
|
sum[7] = (sum[7] & 0xf) | 0x50
|
||||||
|
|
||||||
|
return &windows.GUID{
|
||||||
|
Data1: binary.LittleEndian.Uint32(sum[0:4]),
|
||||||
|
Data2: binary.LittleEndian.Uint16(sum[4:6]),
|
||||||
|
Data3: binary.LittleEndian.Uint16(sum[6:8]),
|
||||||
|
Data4: [8]byte{sum[8], sum[9], sum[10], sum[11], sum[12], sum[13], sum[14], sum[15]},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProvider creates and registers a new ETW provider. The provider ID is
|
||||||
|
// generated based on the provider name.
|
||||||
|
func NewProvider(name string, callback EnableCallback) (provider *Provider, err error) {
|
||||||
|
return NewProviderWithID(name, providerIDFromName(name), callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProviderWithID creates and registers a new ETW provider, allowing the
|
||||||
|
// provider ID to be manually specified. This is most useful when there is an
|
||||||
|
// existing provider ID that must be used to conform to existing diagnostic
|
||||||
|
// infrastructure.
|
||||||
|
func NewProviderWithID(name string, id *windows.GUID, callback EnableCallback) (provider *Provider, err error) {
|
||||||
|
providerCallbackOnce.Do(func() {
|
||||||
|
globalProviderCallback = windows.NewCallback(providerCallbackAdapter)
|
||||||
|
})
|
||||||
|
|
||||||
|
provider = providers.newProvider()
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
providers.removeProvider(provider)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
provider.ID = id
|
||||||
|
provider.callback = callback
|
||||||
|
|
||||||
|
if err := eventRegister(provider.ID, globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := &bytes.Buffer{}
|
||||||
|
binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later)
|
||||||
|
metadata.WriteString(name)
|
||||||
|
metadata.WriteByte(0) // Null terminator for name
|
||||||
|
binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer
|
||||||
|
provider.metadata = metadata.Bytes()
|
||||||
|
|
||||||
|
if err := eventSetInformation(
|
||||||
|
provider.handle,
|
||||||
|
eventInfoClassProviderSetTraits,
|
||||||
|
uintptr(unsafe.Pointer(&provider.metadata[0])),
|
||||||
|
uint32(len(provider.metadata))); err != nil {
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return provider, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close unregisters the provider.
|
||||||
|
func (provider *Provider) Close() error {
|
||||||
|
providers.removeProvider(provider)
|
||||||
|
return eventUnregister(provider.handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEnabled calls IsEnabledForLevelAndKeywords with LevelAlways and all
|
||||||
|
// keywords set.
|
||||||
|
func (provider *Provider) IsEnabled() bool {
|
||||||
|
return provider.IsEnabledForLevelAndKeywords(LevelAlways, ^uint64(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEnabledForLevel calls IsEnabledForLevelAndKeywords with the specified level
|
||||||
|
// and all keywords set.
|
||||||
|
func (provider *Provider) IsEnabledForLevel(level Level) bool {
|
||||||
|
return provider.IsEnabledForLevelAndKeywords(level, ^uint64(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEnabledForLevelAndKeywords allows event producer code to check if there are
|
||||||
|
// any event sessions that are interested in an event, based on the event level
|
||||||
|
// and keywords. Although this check happens automatically in the ETW
|
||||||
|
// infrastructure, it can be useful to check if an event will actually be
|
||||||
|
// consumed before doing expensive work to build the event data.
|
||||||
|
func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool {
|
||||||
|
if !provider.enabled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ETW automatically sets the level to 255 if it is specified as 0, so we
|
||||||
|
// don't need to worry about the level=0 (all events) case.
|
||||||
|
if level > provider.level {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if keywords != 0 && (keywords&provider.keywordAny == 0 || keywords&provider.keywordAll != provider.keywordAll) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteEvent writes a single ETW event from the provider. The event is
|
||||||
|
// constructed based on the EventOpt and FieldOpt values that are passed as
|
||||||
|
// opts.
|
||||||
|
func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error {
|
||||||
|
options := eventOptions{descriptor: newEventDescriptor()}
|
||||||
|
em := &eventMetadata{}
|
||||||
|
ed := &eventData{}
|
||||||
|
|
||||||
|
// We need to evaluate the EventOpts first since they might change tags, and
|
||||||
|
// we write out the tags before evaluating FieldOpts.
|
||||||
|
for _, opt := range eventOpts {
|
||||||
|
opt(&options)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !provider.IsEnabledForLevelAndKeywords(options.descriptor.level, options.descriptor.keyword) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
em.writeEventHeader(name, options.tags)
|
||||||
|
|
||||||
|
for _, opt := range fieldOpts {
|
||||||
|
opt(em, ed)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't pass a data blob if there is no event data. There will always be
|
||||||
|
// event metadata (e.g. for the name) so we don't need to do this check for
|
||||||
|
// the metadata.
|
||||||
|
dataBlobs := [][]byte{}
|
||||||
|
if len(ed.bytes()) > 0 {
|
||||||
|
dataBlobs = [][]byte{ed.bytes()}
|
||||||
|
}
|
||||||
|
|
||||||
|
return provider.writeEventRaw(options.descriptor, nil, nil, [][]byte{em.bytes()}, dataBlobs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeEventRaw writes a single ETW event from the provider. This function is
|
||||||
|
// less abstracted than WriteEvent, and presents a fairly direct interface to
|
||||||
|
// the event writing functionality. It expects a series of event metadata and
|
||||||
|
// event data blobs to be passed in, which must conform to the TraceLogging
|
||||||
|
// schema. The functions on EventMetadata and EventData can help with creating
|
||||||
|
// these blobs. The blobs of each type are effectively concatenated together by
|
||||||
|
// the ETW infrastructure.
|
||||||
|
func (provider *Provider) writeEventRaw(
|
||||||
|
descriptor *eventDescriptor,
|
||||||
|
activityID *windows.GUID,
|
||||||
|
relatedActivityID *windows.GUID,
|
||||||
|
metadataBlobs [][]byte,
|
||||||
|
dataBlobs [][]byte) error {
|
||||||
|
|
||||||
|
dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs))
|
||||||
|
dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount)
|
||||||
|
|
||||||
|
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata))
|
||||||
|
for _, blob := range metadataBlobs {
|
||||||
|
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob))
|
||||||
|
}
|
||||||
|
for _, blob := range dataBlobs {
|
||||||
|
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob))
|
||||||
|
}
|
||||||
|
|
||||||
|
return eventWriteTransfer(provider.handle, descriptor, activityID, relatedActivityID, dataDescriptorCount, &dataDescriptors[0])
|
||||||
|
}
|
52
vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go
generated
vendored
Normal file
52
vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Because the provider callback function needs to be able to access the
|
||||||
|
// provider data when it is invoked by ETW, we need to keep provider data stored
|
||||||
|
// in a global map based on an index. The index is passed as the callback
|
||||||
|
// context to ETW.
|
||||||
|
type providerMap struct {
|
||||||
|
m map[uint]*Provider
|
||||||
|
i uint
|
||||||
|
lock sync.Mutex
|
||||||
|
once sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
var providers = providerMap{
|
||||||
|
m: make(map[uint]*Provider),
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *providerMap) newProvider() *Provider {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
i := p.i
|
||||||
|
p.i++
|
||||||
|
|
||||||
|
provider := &Provider{
|
||||||
|
index: i,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.m[i] = provider
|
||||||
|
return provider
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *providerMap) removeProvider(provider *Provider) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
delete(p.m, provider.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *providerMap) getProvider(index uint) *Provider {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
return p.m[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
var providerCallbackOnce sync.Once
|
||||||
|
var globalProviderCallback uintptr
|
16
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// +build 386 arm
|
||||||
|
|
||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
|
||||||
|
// be 64 bits, regardless of the actual size of a pointer on the platform. This
|
||||||
|
// is intended for use with certain Windows APIs that expect a pointer as a
|
||||||
|
// ULONGLONG.
|
||||||
|
type ptr64 struct {
|
||||||
|
ptr unsafe.Pointer
|
||||||
|
_ uint32
|
||||||
|
}
|
15
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// +build amd64 arm64
|
||||||
|
|
||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
|
||||||
|
// be 64 bits, regardless of the actual size of a pointer on the platform. This
|
||||||
|
// is intended for use with certain Windows APIs that expect a pointer as a
|
||||||
|
// ULONGLONG.
|
||||||
|
type ptr64 struct {
|
||||||
|
ptr unsafe.Pointer
|
||||||
|
}
|
78
vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go
generated
vendored
Normal file
78
vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Code generated by 'go generate'; DO NOT EDIT.
|
||||||
|
|
||||||
|
package etw
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ unsafe.Pointer
|
||||||
|
|
||||||
|
// Do the interface allocations only once for common
|
||||||
|
// Errno values.
|
||||||
|
const (
|
||||||
|
errnoERROR_IO_PENDING = 997
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||||
|
)
|
||||||
|
|
||||||
|
// errnoErr returns common boxed Errno values, to prevent
|
||||||
|
// allocations at runtime.
|
||||||
|
func errnoErr(e syscall.Errno) error {
|
||||||
|
switch e {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case errnoERROR_IO_PENDING:
|
||||||
|
return errERROR_IO_PENDING
|
||||||
|
}
|
||||||
|
// TODO: add more here, after collecting data on the common
|
||||||
|
// error values see on Windows. (perhaps when running
|
||||||
|
// all.bat?)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||||
|
|
||||||
|
procEventRegister = modadvapi32.NewProc("EventRegister")
|
||||||
|
procEventUnregister = modadvapi32.NewProc("EventUnregister")
|
||||||
|
procEventWriteTransfer = modadvapi32.NewProc("EventWriteTransfer")
|
||||||
|
procEventSetInformation = modadvapi32.NewProc("EventSetInformation")
|
||||||
|
)
|
||||||
|
|
||||||
|
func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procEventRegister.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(callback), uintptr(callbackContext), uintptr(unsafe.Pointer(providerHandle)), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
win32err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventUnregister(providerHandle providerHandle) (win32err error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
win32err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventWriteTransfer(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)))
|
||||||
|
if r0 != 0 {
|
||||||
|
win32err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0)
|
||||||
|
if r0 != 0 {
|
||||||
|
win32err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
212
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
Normal file
212
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
package etwlogrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/Microsoft/go-winio/pkg/etw"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hook is a Logrus hook which logs received events to ETW.
|
||||||
|
type Hook struct {
|
||||||
|
provider *etw.Provider
|
||||||
|
closeProvider bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHook registers a new ETW provider and returns a hook to log from it. The
|
||||||
|
// provider will be closed when the hook is closed.
|
||||||
|
func NewHook(providerName string) (*Hook, error) {
|
||||||
|
provider, err := etw.NewProvider(providerName, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Hook{provider, true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHookFromProvider creates a new hook based on an existing ETW provider. The
|
||||||
|
// provider will not be closed when the hook is closed.
|
||||||
|
func NewHookFromProvider(provider *etw.Provider) (*Hook, error) {
|
||||||
|
return &Hook{provider, false}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Levels returns the set of levels that this hook wants to receive log entries
|
||||||
|
// for.
|
||||||
|
func (h *Hook) Levels() []logrus.Level {
|
||||||
|
return []logrus.Level{
|
||||||
|
logrus.TraceLevel,
|
||||||
|
logrus.DebugLevel,
|
||||||
|
logrus.InfoLevel,
|
||||||
|
logrus.WarnLevel,
|
||||||
|
logrus.ErrorLevel,
|
||||||
|
logrus.FatalLevel,
|
||||||
|
logrus.PanicLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var logrusToETWLevelMap = map[logrus.Level]etw.Level{
|
||||||
|
logrus.PanicLevel: etw.LevelAlways,
|
||||||
|
logrus.FatalLevel: etw.LevelCritical,
|
||||||
|
logrus.ErrorLevel: etw.LevelError,
|
||||||
|
logrus.WarnLevel: etw.LevelWarning,
|
||||||
|
logrus.InfoLevel: etw.LevelInfo,
|
||||||
|
logrus.DebugLevel: etw.LevelVerbose,
|
||||||
|
logrus.TraceLevel: etw.LevelVerbose,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire receives each Logrus entry as it is logged, and logs it to ETW.
|
||||||
|
func (h *Hook) Fire(e *logrus.Entry) error {
|
||||||
|
// Logrus defines more levels than ETW typically uses, but analysis is
|
||||||
|
// easiest when using a consistent set of levels across ETW providers, so we
|
||||||
|
// map the Logrus levels to ETW levels.
|
||||||
|
level := logrusToETWLevelMap[e.Level]
|
||||||
|
if !h.provider.IsEnabledForLevel(level) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserve extra space for the message field.
|
||||||
|
fields := make([]etw.FieldOpt, 0, len(e.Data)+1)
|
||||||
|
|
||||||
|
fields = append(fields, etw.StringField("Message", e.Message))
|
||||||
|
|
||||||
|
for k, v := range e.Data {
|
||||||
|
fields = append(fields, getFieldOpt(k, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.provider.WriteEvent(
|
||||||
|
"LogrusEntry",
|
||||||
|
etw.WithEventOpts(etw.WithLevel(level)),
|
||||||
|
fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently, we support logging basic builtin types (int, string, etc), slices
|
||||||
|
// of basic builtin types, error, types derived from the basic types (e.g. "type
|
||||||
|
// foo int"), and structs (recursively logging their fields). We do not support
|
||||||
|
// slices of derived types (e.g. "[]foo").
|
||||||
|
//
|
||||||
|
// For types that we don't support, the value is formatted via fmt.Sprint, and
|
||||||
|
// we also log a message that the type is unsupported along with the formatted
|
||||||
|
// type. The intent of this is to make it easier to see which types are not
|
||||||
|
// supported in traces, so we can evaluate adding support for more types in the
|
||||||
|
// future.
|
||||||
|
func getFieldOpt(k string, v interface{}) etw.FieldOpt {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case bool:
|
||||||
|
return etw.BoolField(k, v)
|
||||||
|
case []bool:
|
||||||
|
return etw.BoolArray(k, v)
|
||||||
|
case string:
|
||||||
|
return etw.StringField(k, v)
|
||||||
|
case []string:
|
||||||
|
return etw.StringArray(k, v)
|
||||||
|
case int:
|
||||||
|
return etw.IntField(k, v)
|
||||||
|
case []int:
|
||||||
|
return etw.IntArray(k, v)
|
||||||
|
case int8:
|
||||||
|
return etw.Int8Field(k, v)
|
||||||
|
case []int8:
|
||||||
|
return etw.Int8Array(k, v)
|
||||||
|
case int16:
|
||||||
|
return etw.Int16Field(k, v)
|
||||||
|
case []int16:
|
||||||
|
return etw.Int16Array(k, v)
|
||||||
|
case int32:
|
||||||
|
return etw.Int32Field(k, v)
|
||||||
|
case []int32:
|
||||||
|
return etw.Int32Array(k, v)
|
||||||
|
case int64:
|
||||||
|
return etw.Int64Field(k, v)
|
||||||
|
case []int64:
|
||||||
|
return etw.Int64Array(k, v)
|
||||||
|
case uint:
|
||||||
|
return etw.UintField(k, v)
|
||||||
|
case []uint:
|
||||||
|
return etw.UintArray(k, v)
|
||||||
|
case uint8:
|
||||||
|
return etw.Uint8Field(k, v)
|
||||||
|
case []uint8:
|
||||||
|
return etw.Uint8Array(k, v)
|
||||||
|
case uint16:
|
||||||
|
return etw.Uint16Field(k, v)
|
||||||
|
case []uint16:
|
||||||
|
return etw.Uint16Array(k, v)
|
||||||
|
case uint32:
|
||||||
|
return etw.Uint32Field(k, v)
|
||||||
|
case []uint32:
|
||||||
|
return etw.Uint32Array(k, v)
|
||||||
|
case uint64:
|
||||||
|
return etw.Uint64Field(k, v)
|
||||||
|
case []uint64:
|
||||||
|
return etw.Uint64Array(k, v)
|
||||||
|
case uintptr:
|
||||||
|
return etw.UintptrField(k, v)
|
||||||
|
case []uintptr:
|
||||||
|
return etw.UintptrArray(k, v)
|
||||||
|
case float32:
|
||||||
|
return etw.Float32Field(k, v)
|
||||||
|
case []float32:
|
||||||
|
return etw.Float32Array(k, v)
|
||||||
|
case float64:
|
||||||
|
return etw.Float64Field(k, v)
|
||||||
|
case []float64:
|
||||||
|
return etw.Float64Array(k, v)
|
||||||
|
case error:
|
||||||
|
return etw.StringField(k, v.Error())
|
||||||
|
default:
|
||||||
|
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return getFieldOpt(k, rv.Bool())
|
||||||
|
case reflect.Int:
|
||||||
|
return getFieldOpt(k, int(rv.Int()))
|
||||||
|
case reflect.Int8:
|
||||||
|
return getFieldOpt(k, int8(rv.Int()))
|
||||||
|
case reflect.Int16:
|
||||||
|
return getFieldOpt(k, int16(rv.Int()))
|
||||||
|
case reflect.Int32:
|
||||||
|
return getFieldOpt(k, int32(rv.Int()))
|
||||||
|
case reflect.Int64:
|
||||||
|
return getFieldOpt(k, int64(rv.Int()))
|
||||||
|
case reflect.Uint:
|
||||||
|
return getFieldOpt(k, uint(rv.Uint()))
|
||||||
|
case reflect.Uint8:
|
||||||
|
return getFieldOpt(k, uint8(rv.Uint()))
|
||||||
|
case reflect.Uint16:
|
||||||
|
return getFieldOpt(k, uint16(rv.Uint()))
|
||||||
|
case reflect.Uint32:
|
||||||
|
return getFieldOpt(k, uint32(rv.Uint()))
|
||||||
|
case reflect.Uint64:
|
||||||
|
return getFieldOpt(k, uint64(rv.Uint()))
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return getFieldOpt(k, uintptr(rv.Uint()))
|
||||||
|
case reflect.Float32:
|
||||||
|
return getFieldOpt(k, float32(rv.Float()))
|
||||||
|
case reflect.Float64:
|
||||||
|
return getFieldOpt(k, float64(rv.Float()))
|
||||||
|
case reflect.String:
|
||||||
|
return getFieldOpt(k, rv.String())
|
||||||
|
case reflect.Struct:
|
||||||
|
fields := make([]etw.FieldOpt, 0, rv.NumField())
|
||||||
|
for i := 0; i < rv.NumField(); i++ {
|
||||||
|
field := rv.Field(i)
|
||||||
|
if field.CanInterface() {
|
||||||
|
fields = append(fields, getFieldOpt(k, field.Interface()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return etw.Struct(k, fields...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return etw.StringField(k, fmt.Sprintf("(Unsupported: %T) %v", v, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close cleans up the hook and closes the ETW provider. If the provder was
|
||||||
|
// registered by etwlogrus, it will be closed as part of `Close`. If the
|
||||||
|
// provider was passed in, it will not be closed.
|
||||||
|
func (h *Hook) Close() error {
|
||||||
|
if h.closeProvider {
|
||||||
|
return h.provider.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
9
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
@ -7,9 +7,14 @@ func logOperationBegin(ctx logrus.Fields, msg string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
|
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
|
||||||
|
// Copy the log and fields first.
|
||||||
|
log := logrus.WithFields(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
logrus.WithFields(ctx).Debug(msg)
|
log.Debug(msg)
|
||||||
} else {
|
} else {
|
||||||
logrus.WithFields(ctx).WithError(err).Error(msg)
|
// Edit only the copied field data to avoid race conditions on the
|
||||||
|
// write.
|
||||||
|
log.Data[logrus.ErrorKey] = err
|
||||||
|
log.Error(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
80
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
80
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
@ -23,6 +23,9 @@ type Process struct {
|
|||||||
callbackNumber uintptr
|
callbackNumber uintptr
|
||||||
|
|
||||||
logctx logrus.Fields
|
logctx logrus.Fields
|
||||||
|
|
||||||
|
waitBlock chan struct{}
|
||||||
|
waitError error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newProcess(process hcsProcess, processID int, computeSystem *System) *Process {
|
func newProcess(process hcsProcess, processID int, computeSystem *System) *Process {
|
||||||
@ -31,10 +34,10 @@ func newProcess(process hcsProcess, processID int, computeSystem *System) *Proce
|
|||||||
processID: processID,
|
processID: processID,
|
||||||
system: computeSystem,
|
system: computeSystem,
|
||||||
logctx: logrus.Fields{
|
logctx: logrus.Fields{
|
||||||
logfields.HCSOperation: "",
|
|
||||||
logfields.ContainerID: computeSystem.ID(),
|
logfields.ContainerID: computeSystem.ID(),
|
||||||
logfields.ProcessID: processID,
|
logfields.ProcessID: processID,
|
||||||
},
|
},
|
||||||
|
waitBlock: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,13 +91,12 @@ func (process *Process) SystemID() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (process *Process) logOperationBegin(operation string) {
|
func (process *Process) logOperationBegin(operation string) {
|
||||||
process.logctx[logfields.HCSOperation] = operation
|
|
||||||
logOperationBegin(
|
logOperationBegin(
|
||||||
process.logctx,
|
process.logctx,
|
||||||
"hcsshim::Process - Begin Operation")
|
operation+" - Begin Operation")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (process *Process) logOperationEnd(err error) {
|
func (process *Process) logOperationEnd(operation string, err error) {
|
||||||
var result string
|
var result string
|
||||||
if err == nil {
|
if err == nil {
|
||||||
result = "Success"
|
result = "Success"
|
||||||
@ -104,9 +106,8 @@ func (process *Process) logOperationEnd(err error) {
|
|||||||
|
|
||||||
logOperationEnd(
|
logOperationEnd(
|
||||||
process.logctx,
|
process.logctx,
|
||||||
"hcsshim::Process - End Operation - "+result,
|
operation+" - End Operation - "+result,
|
||||||
err)
|
err)
|
||||||
process.logctx[logfields.HCSOperation] = ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Signal signals the process with `options`.
|
// Signal signals the process with `options`.
|
||||||
@ -116,7 +117,7 @@ func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err e
|
|||||||
|
|
||||||
operation := "hcsshim::Process::Signal"
|
operation := "hcsshim::Process::Signal"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if process.handle == 0 {
|
if process.handle == 0 {
|
||||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||||
@ -148,7 +149,7 @@ func (process *Process) Kill() (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::Process::Kill"
|
operation := "hcsshim::Process::Kill"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if process.handle == 0 {
|
if process.handle == 0 {
|
||||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||||
@ -166,33 +167,47 @@ func (process *Process) Kill() (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait waits for the process to exit.
|
// waitBackground waits for the process exit notification. Once received sets
|
||||||
|
// `process.waitError` (if any) and unblocks all `Wait` and `WaitTimeout` calls.
|
||||||
|
//
|
||||||
|
// This MUST be called exactly once per `process.handle` but `Wait` and
|
||||||
|
// `WaitTimeout` are safe to call multiple times.
|
||||||
|
func (process *Process) waitBackground() {
|
||||||
|
process.waitError = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||||
|
close(process.waitBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait waits for the process to exit. If the process has already exited returns
|
||||||
|
// the pervious error (if any).
|
||||||
func (process *Process) Wait() (err error) {
|
func (process *Process) Wait() (err error) {
|
||||||
operation := "hcsshim::Process::Wait"
|
operation := "hcsshim::Process::Wait"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
|
<-process.waitBlock
|
||||||
if err != nil {
|
if process.waitError != nil {
|
||||||
return makeProcessError(process, operation, err, nil)
|
return makeProcessError(process, operation, err, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
|
// WaitTimeout waits for the process to exit or the duration to elapse. If the
|
||||||
// false if timeout occurs.
|
// process has already exited returns the pervious error (if any). If a timeout
|
||||||
|
// occurs returns `ErrTimeout`.
|
||||||
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
|
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
|
||||||
operation := "hcssshim::Process::WaitTimeout"
|
operation := "hcssshim::Process::WaitTimeout"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
|
select {
|
||||||
if err != nil {
|
case <-process.waitBlock:
|
||||||
return makeProcessError(process, operation, err, nil)
|
if process.waitError != nil {
|
||||||
|
return makeProcessError(process, operation, process.waitError, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return makeProcessError(process, operation, ErrTimeout, nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResizeConsole resizes the console of the process.
|
// ResizeConsole resizes the console of the process.
|
||||||
@ -202,7 +217,7 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::Process::ResizeConsole"
|
operation := "hcsshim::Process::ResizeConsole"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if process.handle == 0 {
|
if process.handle == 0 {
|
||||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||||
@ -239,7 +254,7 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::Process::Properties"
|
operation := "hcsshim::Process::Properties"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if process.handle == 0 {
|
if process.handle == 0 {
|
||||||
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||||
@ -275,19 +290,24 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
|
|||||||
func (process *Process) ExitCode() (_ int, err error) {
|
func (process *Process) ExitCode() (_ int, err error) {
|
||||||
operation := "hcsshim::Process::ExitCode"
|
operation := "hcsshim::Process::ExitCode"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
properties, err := process.Properties()
|
properties, err := process.Properties()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, makeProcessError(process, operation, err, nil)
|
return -1, makeProcessError(process, operation, err, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if properties.Exited == false {
|
if properties.Exited == false {
|
||||||
return 0, makeProcessError(process, operation, ErrInvalidProcessState, nil)
|
return -1, makeProcessError(process, operation, ErrInvalidProcessState, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if properties.LastWaitResult != 0 {
|
if properties.LastWaitResult != 0 {
|
||||||
return 0, makeProcessError(process, operation, syscall.Errno(properties.LastWaitResult), nil)
|
logrus.WithFields(logrus.Fields{
|
||||||
|
logfields.ContainerID: process.SystemID(),
|
||||||
|
logfields.ProcessID: process.processID,
|
||||||
|
"wait-result": properties.LastWaitResult,
|
||||||
|
}).Warn("hcsshim::Process::ExitCode - Non-zero last wait result")
|
||||||
|
return -1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return int(properties.ExitCode), nil
|
return int(properties.ExitCode), nil
|
||||||
@ -302,7 +322,7 @@ func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadClo
|
|||||||
|
|
||||||
operation := "hcsshim::Process::Stdio"
|
operation := "hcsshim::Process::Stdio"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if process.handle == 0 {
|
if process.handle == 0 {
|
||||||
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||||
@ -346,7 +366,7 @@ func (process *Process) CloseStdin() (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::Process::CloseStdin"
|
operation := "hcsshim::Process::CloseStdin"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if process.handle == 0 {
|
if process.handle == 0 {
|
||||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||||
@ -384,7 +404,7 @@ func (process *Process) Close() (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::Process::Close"
|
operation := "hcsshim::Process::Close"
|
||||||
process.logOperationBegin(operation)
|
process.logOperationBegin(operation)
|
||||||
defer func() { process.logOperationEnd(err) }()
|
defer func() { process.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
// Don't double free this
|
// Don't double free this
|
||||||
if process.handle == 0 {
|
if process.handle == 0 {
|
||||||
@ -453,7 +473,7 @@ func (process *Process) unregisterCallback() error {
|
|||||||
closeChannels(context.channels)
|
closeChannels(context.channels)
|
||||||
|
|
||||||
callbackMapLock.Lock()
|
callbackMapLock.Lock()
|
||||||
callbackMap[callbackNumber] = nil
|
delete(callbackMap, callbackNumber)
|
||||||
callbackMapLock.Unlock()
|
callbackMapLock.Unlock()
|
||||||
|
|
||||||
handle = 0
|
handle = 0
|
||||||
|
113
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
113
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@ -43,26 +43,28 @@ type System struct {
|
|||||||
callbackNumber uintptr
|
callbackNumber uintptr
|
||||||
|
|
||||||
logctx logrus.Fields
|
logctx logrus.Fields
|
||||||
|
|
||||||
|
waitBlock chan struct{}
|
||||||
|
waitError error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSystem(id string) *System {
|
func newSystem(id string) *System {
|
||||||
return &System{
|
return &System{
|
||||||
id: id,
|
id: id,
|
||||||
logctx: logrus.Fields{
|
logctx: logrus.Fields{
|
||||||
logfields.HCSOperation: "",
|
|
||||||
logfields.ContainerID: id,
|
logfields.ContainerID: id,
|
||||||
},
|
},
|
||||||
|
waitBlock: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (computeSystem *System) logOperationBegin(operation string) {
|
func (computeSystem *System) logOperationBegin(operation string) {
|
||||||
computeSystem.logctx[logfields.HCSOperation] = operation
|
|
||||||
logOperationBegin(
|
logOperationBegin(
|
||||||
computeSystem.logctx,
|
computeSystem.logctx,
|
||||||
"hcsshim::ComputeSystem - Begin Operation")
|
operation+" - Begin Operation")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (computeSystem *System) logOperationEnd(err error) {
|
func (computeSystem *System) logOperationEnd(operation string, err error) {
|
||||||
var result string
|
var result string
|
||||||
if err == nil {
|
if err == nil {
|
||||||
result = "Success"
|
result = "Success"
|
||||||
@ -72,9 +74,8 @@ func (computeSystem *System) logOperationEnd(err error) {
|
|||||||
|
|
||||||
logOperationEnd(
|
logOperationEnd(
|
||||||
computeSystem.logctx,
|
computeSystem.logctx,
|
||||||
"hcsshim::ComputeSystem - End Operation - "+result,
|
operation+" - End Operation - "+result,
|
||||||
err)
|
err)
|
||||||
computeSystem.logctx[logfields.HCSOperation] = ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||||
@ -83,7 +84,7 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
|
|||||||
|
|
||||||
computeSystem := newSystem(id)
|
computeSystem := newSystem(id)
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
|
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -124,6 +125,8 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
|
|||||||
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
|
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go computeSystem.waitBackground()
|
||||||
|
|
||||||
return computeSystem, nil
|
return computeSystem, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,9 +138,9 @@ func OpenComputeSystem(id string) (_ *System, err error) {
|
|||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() {
|
defer func() {
|
||||||
if IsNotExist(err) {
|
if IsNotExist(err) {
|
||||||
computeSystem.logOperationEnd(nil)
|
computeSystem.logOperationEnd(operation, nil)
|
||||||
} else {
|
} else {
|
||||||
computeSystem.logOperationEnd(err)
|
computeSystem.logOperationEnd(operation, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -156,6 +159,7 @@ func OpenComputeSystem(id string) (_ *System, err error) {
|
|||||||
if err = computeSystem.registerCallback(); err != nil {
|
if err = computeSystem.registerCallback(); err != nil {
|
||||||
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
return nil, makeSystemError(computeSystem, operation, "", err, nil)
|
||||||
}
|
}
|
||||||
|
go computeSystem.waitBackground()
|
||||||
|
|
||||||
return computeSystem, nil
|
return computeSystem, nil
|
||||||
}
|
}
|
||||||
@ -163,12 +167,10 @@ func OpenComputeSystem(id string) (_ *System, err error) {
|
|||||||
// GetComputeSystems gets a list of the compute systems on the system that match the query
|
// GetComputeSystems gets a list of the compute systems on the system that match the query
|
||||||
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
|
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
|
||||||
operation := "hcsshim::GetComputeSystems"
|
operation := "hcsshim::GetComputeSystems"
|
||||||
fields := logrus.Fields{
|
fields := logrus.Fields{}
|
||||||
logfields.HCSOperation: operation,
|
|
||||||
}
|
|
||||||
logOperationBegin(
|
logOperationBegin(
|
||||||
fields,
|
fields,
|
||||||
"hcsshim::ComputeSystem - Begin Operation")
|
operation+" - Begin Operation")
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
var result string
|
var result string
|
||||||
@ -180,7 +182,7 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope
|
|||||||
|
|
||||||
logOperationEnd(
|
logOperationEnd(
|
||||||
fields,
|
fields,
|
||||||
"hcsshim::ComputeSystem - End Operation - "+result,
|
operation+" - End Operation - "+result,
|
||||||
err)
|
err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -227,7 +229,7 @@ func (computeSystem *System) Start() (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::Start"
|
operation := "hcsshim::ComputeSystem::Start"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if computeSystem.handle == 0 {
|
if computeSystem.handle == 0 {
|
||||||
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
|
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
|
||||||
@ -285,10 +287,10 @@ func (computeSystem *System) Shutdown() (err error) {
|
|||||||
operation := "hcsshim::ComputeSystem::Shutdown"
|
operation := "hcsshim::ComputeSystem::Shutdown"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() {
|
defer func() {
|
||||||
if IsAlreadyStopped(err) {
|
if IsAlreadyStopped(err) || IsPending(err) {
|
||||||
computeSystem.logOperationEnd(nil)
|
computeSystem.logOperationEnd(operation, nil)
|
||||||
} else {
|
} else {
|
||||||
computeSystem.logOperationEnd(err)
|
computeSystem.logOperationEnd(operation, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -318,9 +320,9 @@ func (computeSystem *System) Terminate() (err error) {
|
|||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() {
|
defer func() {
|
||||||
if IsPending(err) {
|
if IsPending(err) {
|
||||||
computeSystem.logOperationEnd(nil)
|
computeSystem.logOperationEnd(operation, nil)
|
||||||
} else {
|
} else {
|
||||||
computeSystem.logOperationEnd(err)
|
computeSystem.logOperationEnd(operation, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -340,48 +342,65 @@ func (computeSystem *System) Terminate() (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait synchronously waits for the compute system to shutdown or terminate.
|
// waitBackground waits for the compute system exit notification. Once received
|
||||||
|
// sets `computeSystem.waitError` (if any) and unblocks all `Wait`,
|
||||||
|
// `WaitExpectedError`, and `WaitTimeout` calls.
|
||||||
|
//
|
||||||
|
// This MUST be called exactly once per `computeSystem.handle` but `Wait`,
|
||||||
|
// `WaitExpectedError`, and `WaitTimeout` are safe to call multiple times.
|
||||||
|
func (computeSystem *System) waitBackground() {
|
||||||
|
computeSystem.waitError = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||||
|
close(computeSystem.waitBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait synchronously waits for the compute system to shutdown or terminate. If
|
||||||
|
// the compute system has already exited returns the previous error (if any).
|
||||||
func (computeSystem *System) Wait() (err error) {
|
func (computeSystem *System) Wait() (err error) {
|
||||||
operation := "hcsshim::ComputeSystem::Wait"
|
operation := "hcsshim::ComputeSystem::Wait"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
<-computeSystem.waitBlock
|
||||||
if err != nil {
|
if computeSystem.waitError != nil {
|
||||||
return makeSystemError(computeSystem, "Wait", "", err, nil)
|
return makeSystemError(computeSystem, "Wait", "", computeSystem.waitError, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitExpectedError synchronously waits for the compute system to shutdown or
|
// WaitExpectedError synchronously waits for the compute system to shutdown or
|
||||||
// terminate, and ignores the passed error if it occurs.
|
// terminate and returns the error (if any) as long as it does not match
|
||||||
|
// `expected`. If the compute system has already exited returns the previous
|
||||||
|
// error (if any) as long as it does not match `expected`.
|
||||||
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
|
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
|
||||||
operation := "hcsshim::ComputeSystem::WaitExpectedError"
|
operation := "hcsshim::ComputeSystem::WaitExpectedError"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
<-computeSystem.waitBlock
|
||||||
if err != nil && err != expected {
|
if computeSystem.waitError != nil && getInnerError(computeSystem.waitError) != expected {
|
||||||
return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil)
|
return makeSystemError(computeSystem, "WaitExpectedError", "", computeSystem.waitError, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse.
|
// WaitTimeout synchronously waits for the compute system to terminate or the
|
||||||
// If the timeout expires, IsTimeout(err) == true
|
// duration to elapse. If the timeout expires, `IsTimeout(err) == true`. If
|
||||||
|
// the compute system has already exited returns the previous error (if any).
|
||||||
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
|
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
|
||||||
operation := "hcsshim::ComputeSystem::WaitTimeout"
|
operation := "hcsshim::ComputeSystem::WaitTimeout"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
|
select {
|
||||||
if err != nil {
|
case <-computeSystem.waitBlock:
|
||||||
return makeSystemError(computeSystem, "WaitTimeout", "", err, nil)
|
if computeSystem.waitError != nil {
|
||||||
|
return makeSystemError(computeSystem, "WaitTimeout", "", computeSystem.waitError, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return makeSystemError(computeSystem, "WaitTimeout", "", ErrTimeout, nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) {
|
func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) {
|
||||||
@ -390,7 +409,7 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schem
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::Properties"
|
operation := "hcsshim::ComputeSystem::Properties"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
queryj, err := json.Marshal(schema1.PropertyQuery{types})
|
queryj, err := json.Marshal(schema1.PropertyQuery{types})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -429,7 +448,7 @@ func (computeSystem *System) Pause() (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::Pause"
|
operation := "hcsshim::ComputeSystem::Pause"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if computeSystem.handle == 0 {
|
if computeSystem.handle == 0 {
|
||||||
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
|
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
|
||||||
@ -454,7 +473,7 @@ func (computeSystem *System) Resume() (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::Resume"
|
operation := "hcsshim::ComputeSystem::Resume"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if computeSystem.handle == 0 {
|
if computeSystem.handle == 0 {
|
||||||
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
|
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
|
||||||
@ -479,7 +498,7 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::CreateProcess"
|
operation := "hcsshim::ComputeSystem::CreateProcess"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
processInfo hcsProcessInformation
|
processInfo hcsProcessInformation
|
||||||
@ -524,6 +543,7 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error
|
|||||||
if err = process.registerCallback(); err != nil {
|
if err = process.registerCallback(); err != nil {
|
||||||
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
|
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
|
||||||
}
|
}
|
||||||
|
go process.waitBackground()
|
||||||
|
|
||||||
return process, nil
|
return process, nil
|
||||||
}
|
}
|
||||||
@ -539,7 +559,7 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::OpenProcess"
|
operation := "hcsshim::ComputeSystem::OpenProcess"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
processHandle hcsProcess
|
processHandle hcsProcess
|
||||||
@ -562,6 +582,7 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
|
|||||||
if err = process.registerCallback(); err != nil {
|
if err = process.registerCallback(); err != nil {
|
||||||
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil)
|
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil)
|
||||||
}
|
}
|
||||||
|
go process.waitBackground()
|
||||||
|
|
||||||
return process, nil
|
return process, nil
|
||||||
}
|
}
|
||||||
@ -573,7 +594,7 @@ func (computeSystem *System) Close() (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::Close"
|
operation := "hcsshim::ComputeSystem::Close"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
// Don't double free this
|
// Don't double free this
|
||||||
if computeSystem.handle == 0 {
|
if computeSystem.handle == 0 {
|
||||||
@ -645,7 +666,7 @@ func (computeSystem *System) unregisterCallback() error {
|
|||||||
closeChannels(context.channels)
|
closeChannels(context.channels)
|
||||||
|
|
||||||
callbackMapLock.Lock()
|
callbackMapLock.Lock()
|
||||||
callbackMap[callbackNumber] = nil
|
delete(callbackMap, callbackNumber)
|
||||||
callbackMapLock.Unlock()
|
callbackMapLock.Unlock()
|
||||||
|
|
||||||
handle = 0
|
handle = 0
|
||||||
@ -660,7 +681,7 @@ func (computeSystem *System) Modify(config interface{}) (err error) {
|
|||||||
|
|
||||||
operation := "hcsshim::ComputeSystem::Modify"
|
operation := "hcsshim::ComputeSystem::Modify"
|
||||||
computeSystem.logOperationBegin(operation)
|
computeSystem.logOperationBegin(operation)
|
||||||
defer func() { computeSystem.logOperationEnd(err) }()
|
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||||
|
|
||||||
if computeSystem.handle == 0 {
|
if computeSystem.handle == 0 {
|
||||||
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
|
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
|
||||||
|
5
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go
generated
vendored
@ -17,6 +17,11 @@ func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, e
|
|||||||
|
|
||||||
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
|
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
|
||||||
callbackMapLock.RLock()
|
callbackMapLock.RLock()
|
||||||
|
if _, ok := callbackMap[callbackNumber]; !ok {
|
||||||
|
callbackMapLock.RUnlock()
|
||||||
|
logrus.Errorf("failed to waitForNotification: callbackNumber %d does not exist in callbackMap", callbackNumber)
|
||||||
|
return ErrHandleClose
|
||||||
|
}
|
||||||
channels := callbackMap[callbackNumber].channels
|
channels := callbackMap[callbackNumber].channels
|
||||||
callbackMapLock.RUnlock()
|
callbackMapLock.RUnlock()
|
||||||
|
|
||||||
|
10
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go
generated
vendored
10
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go
generated
vendored
@ -2,9 +2,9 @@ package hns
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net"
|
"errors"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"net"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Subnet is assoicated with a network and represents a list
|
// Subnet is assoicated with a network and represents a list
|
||||||
@ -98,6 +98,12 @@ func (network *HNSNetwork) Create() (*HNSNetwork, error) {
|
|||||||
title := "hcsshim::HNSNetwork::" + operation
|
title := "hcsshim::HNSNetwork::" + operation
|
||||||
logrus.Debugf(title+" id=%s", network.Id)
|
logrus.Debugf(title+" id=%s", network.Id)
|
||||||
|
|
||||||
|
for _, subnet := range network.Subnets {
|
||||||
|
if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") {
|
||||||
|
return nil, errors.New("network create error, subnet has address prefix but no gateway specified")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
jsonString, err := json.Marshal(network)
|
jsonString, err := json.Marshal(network)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
5
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
@ -26,11 +26,6 @@ const (
|
|||||||
Uint32 = "uint32"
|
Uint32 = "uint32"
|
||||||
Uint64 = "uint64"
|
Uint64 = "uint64"
|
||||||
|
|
||||||
// HCS
|
|
||||||
|
|
||||||
HCSOperation = "hcs-op"
|
|
||||||
HCSOperationResult = "hcs-op-result"
|
|
||||||
|
|
||||||
// runhcs
|
// runhcs
|
||||||
|
|
||||||
VMShimOperation = "vmshim-op"
|
VMShimOperation = "vmshim-op"
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
@ -87,7 +87,7 @@ func OpenRoot(path string) (*os.File, error) {
|
|||||||
|
|
||||||
func ntRelativePath(path string) ([]uint16, error) {
|
func ntRelativePath(path string) ([]uint16, error) {
|
||||||
path = filepath.Clean(path)
|
path = filepath.Clean(path)
|
||||||
if strings.Contains(":", path) {
|
if strings.Contains(path, ":") {
|
||||||
// Since alternate data streams must follow the file they
|
// Since alternate data streams must follow the file they
|
||||||
// are attached to, finding one here (out of order) is invalid.
|
// are attached to, finding one here (out of order) is invalid.
|
||||||
return nil, errors.New("path contains invalid character `:`")
|
return nil, errors.New("path contains invalid character `:`")
|
||||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go
generated
vendored
@ -10,7 +10,6 @@
|
|||||||
package hcsschema
|
package hcsschema
|
||||||
|
|
||||||
type Plan9Share struct {
|
type Plan9Share struct {
|
||||||
|
|
||||||
Name string `json:"Name,omitempty"`
|
Name string `json:"Name,omitempty"`
|
||||||
|
|
||||||
// The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol.
|
// The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol.
|
||||||
@ -30,4 +29,6 @@ type Plan9Share struct {
|
|||||||
ReadOnly bool `json:"ReadOnly,omitempty"`
|
ReadOnly bool `json:"ReadOnly,omitempty"`
|
||||||
|
|
||||||
UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"`
|
UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"`
|
||||||
|
|
||||||
|
AllowedFiles []string `json:"AllowedFiles,omitempty"`
|
||||||
}
|
}
|
||||||
|
16
vendor/github.com/Microsoft/hcsshim/vendor.conf
generated
vendored
16
vendor/github.com/Microsoft/hcsshim/vendor.conf
generated
vendored
@ -1,13 +1,20 @@
|
|||||||
github.com/blang/semver v3.1.0
|
github.com/blang/semver v3.1.0
|
||||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||||
|
github.com/containerd/containerd faec567304bbdf6864b1663d4f813641b5880a4a
|
||||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
||||||
|
github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
|
||||||
|
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
|
||||||
|
github.com/gogo/protobuf v1.0.0
|
||||||
|
github.com/golang/protobuf v1.1.0
|
||||||
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
|
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
|
||||||
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
||||||
github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c
|
github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c
|
||||||
github.com/Microsoft/go-winio 16cfc975803886a5e47c4257a24c8d8c52e178b2
|
github.com/Microsoft/go-winio c599b533b43b1363d7d7c6cfda5ede70ed73ff13
|
||||||
github.com/Microsoft/opengcs v0.3.9
|
github.com/Microsoft/opengcs v0.3.9
|
||||||
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353
|
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||||
|
github.com/opencontainers/runc 12f6a991201fdb8f82579582d5e00e28fba06d0a
|
||||||
|
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4
|
||||||
github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88
|
github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88
|
||||||
github.com/pkg/errors v0.8.1
|
github.com/pkg/errors v0.8.1
|
||||||
github.com/sirupsen/logrus v1.3.0
|
github.com/sirupsen/logrus v1.3.0
|
||||||
@ -17,5 +24,10 @@ github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
|||||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||||
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
|
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
|
||||||
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908
|
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908
|
||||||
|
golang.org/x/net ed066c81e75eba56dd9bd2139ade88125b855585
|
||||||
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f
|
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f
|
||||||
golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb
|
golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb
|
||||||
|
golang.org/x/text d14c52b222ee852cdba8b07206ca0c614b389876
|
||||||
|
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||||
|
google.golang.org/grpc v1.12.0
|
||||||
|
k8s.io/kubernetes v1.13.0
|
||||||
|
183
vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
generated
vendored
183
vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
generated
vendored
@ -28,6 +28,7 @@ import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
|||||||
|
|
||||||
import strings "strings"
|
import strings "strings"
|
||||||
import reflect "reflect"
|
import reflect "reflect"
|
||||||
|
import sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||||
|
|
||||||
import io "io"
|
import io "io"
|
||||||
|
|
||||||
@ -51,6 +52,7 @@ type Descriptor struct {
|
|||||||
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
||||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||||
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||||
|
Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Descriptor) Reset() { *m = Descriptor{} }
|
func (m *Descriptor) Reset() { *m = Descriptor{} }
|
||||||
@ -92,6 +94,23 @@ func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i++
|
i++
|
||||||
i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
|
i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
|
||||||
}
|
}
|
||||||
|
if len(m.Annotations) > 0 {
|
||||||
|
for k, _ := range m.Annotations {
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
i++
|
||||||
|
v := m.Annotations[k]
|
||||||
|
mapSize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
||||||
|
i = encodeVarintDescriptor(dAtA, i, uint64(mapSize))
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintDescriptor(dAtA, i, uint64(len(k)))
|
||||||
|
i += copy(dAtA[i:], k)
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintDescriptor(dAtA, i, uint64(len(v)))
|
||||||
|
i += copy(dAtA[i:], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,6 +137,14 @@ func (m *Descriptor) Size() (n int) {
|
|||||||
if m.Size_ != 0 {
|
if m.Size_ != 0 {
|
||||||
n += 1 + sovDescriptor(uint64(m.Size_))
|
n += 1 + sovDescriptor(uint64(m.Size_))
|
||||||
}
|
}
|
||||||
|
if len(m.Annotations) > 0 {
|
||||||
|
for k, v := range m.Annotations {
|
||||||
|
_ = k
|
||||||
|
_ = v
|
||||||
|
mapEntrySize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
||||||
|
n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize))
|
||||||
|
}
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,10 +165,21 @@ func (this *Descriptor) String() string {
|
|||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
|
keysForAnnotations := make([]string, 0, len(this.Annotations))
|
||||||
|
for k, _ := range this.Annotations {
|
||||||
|
keysForAnnotations = append(keysForAnnotations, k)
|
||||||
|
}
|
||||||
|
sortkeys.Strings(keysForAnnotations)
|
||||||
|
mapStringForAnnotations := "map[string]string{"
|
||||||
|
for _, k := range keysForAnnotations {
|
||||||
|
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
|
||||||
|
}
|
||||||
|
mapStringForAnnotations += "}"
|
||||||
s := strings.Join([]string{`&Descriptor{`,
|
s := strings.Join([]string{`&Descriptor{`,
|
||||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
||||||
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
||||||
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
|
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
|
||||||
|
`Annotations:` + mapStringForAnnotations + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -260,6 +298,124 @@ func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case 5:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDescriptor
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthDescriptor
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.Annotations == nil {
|
||||||
|
m.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
var mapkey string
|
||||||
|
var mapvalue string
|
||||||
|
for iNdEx < postIndex {
|
||||||
|
entryPreIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDescriptor
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
if fieldNum == 1 {
|
||||||
|
var stringLenmapkey uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDescriptor
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapkey := int(stringLenmapkey)
|
||||||
|
if intStringLenmapkey < 0 {
|
||||||
|
return ErrInvalidLengthDescriptor
|
||||||
|
}
|
||||||
|
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||||
|
if postStringIndexmapkey > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||||
|
iNdEx = postStringIndexmapkey
|
||||||
|
} else if fieldNum == 2 {
|
||||||
|
var stringLenmapvalue uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDescriptor
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLenmapvalue := int(stringLenmapvalue)
|
||||||
|
if intStringLenmapvalue < 0 {
|
||||||
|
return ErrInvalidLengthDescriptor
|
||||||
|
}
|
||||||
|
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||||
|
if postStringIndexmapvalue > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||||
|
iNdEx = postStringIndexmapvalue
|
||||||
|
} else {
|
||||||
|
iNdEx = entryPreIndex
|
||||||
|
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthDescriptor
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > postIndex {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.Annotations[mapkey] = mapvalue
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
||||||
@ -391,20 +547,25 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorDescriptor = []byte{
|
var fileDescriptorDescriptor = []byte{
|
||||||
// 234 bytes of a gzipped FileDescriptorProto
|
// 311 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
||||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
||||||
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
||||||
0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
|
0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29,
|
||||||
0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
|
0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90,
|
||||||
0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
|
0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90,
|
||||||
0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
|
0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20,
|
||||||
0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
|
0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82,
|
||||||
0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
|
0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c,
|
||||||
0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
|
0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7,
|
||||||
0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
|
0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62,
|
||||||
0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00,
|
0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04,
|
||||||
0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00,
|
0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70,
|
||||||
|
0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a,
|
||||||
|
0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
|
||||||
|
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1,
|
||||||
|
0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22,
|
||||||
|
0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/containerd/containerd/api/types/descriptor.proto
generated
vendored
1
vendor/github.com/containerd/containerd/api/types/descriptor.proto
generated
vendored
@ -15,4 +15,5 @@ message Descriptor {
|
|||||||
string media_type = 1;
|
string media_type = 1;
|
||||||
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
int64 size = 3;
|
int64 size = 3;
|
||||||
|
map<string, string> annotations = 5;
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containerd/containerd/cmd/ctr/commands/images/images.go
generated
vendored
4
vendor/github.com/containerd/containerd/cmd/ctr/commands/images/images.go
generated
vendored
@ -54,7 +54,7 @@ var listCommand = cli.Command{
|
|||||||
Name: "list",
|
Name: "list",
|
||||||
Aliases: []string{"ls"},
|
Aliases: []string{"ls"},
|
||||||
Usage: "list images known to containerd",
|
Usage: "list images known to containerd",
|
||||||
ArgsUsage: "[flags] <ref>",
|
ArgsUsage: "[flags] [<filter>, ...]",
|
||||||
Description: "list images registered with containerd",
|
Description: "list images registered with containerd",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
@ -196,7 +196,7 @@ var setLabelsCommand = cli.Command{
|
|||||||
var checkCommand = cli.Command{
|
var checkCommand = cli.Command{
|
||||||
Name: "check",
|
Name: "check",
|
||||||
Usage: "check that an image has all content available locally",
|
Usage: "check that an image has all content available locally",
|
||||||
ArgsUsage: "[flags] <ref> [<ref>, ...]",
|
ArgsUsage: "[flags] [<filter>, ...]",
|
||||||
Description: "check that an image has all content available locally",
|
Description: "check that an image has all content available locally",
|
||||||
Flags: commands.SnapshotterFlags,
|
Flags: commands.SnapshotterFlags,
|
||||||
Action: func(context *cli.Context) error {
|
Action: func(context *cli.Context) error {
|
||||||
|
6
vendor/github.com/containerd/containerd/cmd/ctr/commands/images/import.go
generated
vendored
6
vendor/github.com/containerd/containerd/cmd/ctr/commands/images/import.go
generated
vendored
@ -64,6 +64,10 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb
|
|||||||
Name: "index-name",
|
Name: "index-name",
|
||||||
Usage: "image name to keep index as, by default index is discarded",
|
Usage: "image name to keep index as, by default index is discarded",
|
||||||
},
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "all-platforms",
|
||||||
|
Usage: "imports content for all platforms, false by default",
|
||||||
|
},
|
||||||
}, commands.SnapshotterFlags...),
|
}, commands.SnapshotterFlags...),
|
||||||
|
|
||||||
Action: func(context *cli.Context) error {
|
Action: func(context *cli.Context) error {
|
||||||
@ -89,6 +93,8 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb
|
|||||||
opts = append(opts, containerd.WithIndexName(idxName))
|
opts = append(opts, containerd.WithIndexName(idxName))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts = append(opts, containerd.WithAllPlatforms(context.Bool("all-platforms")))
|
||||||
|
|
||||||
client, ctx, cancel, err := commands.NewClient(context)
|
client, ctx, cancel, err := commands.NewClient(context)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
1
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
1
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
@ -74,6 +74,7 @@ func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Conta
|
|||||||
Size: d.Size_,
|
Size: d.Size_,
|
||||||
Digest: d.Digest,
|
Digest: d.Digest,
|
||||||
Platform: &platformSpec,
|
Platform: &platformSpec,
|
||||||
|
Annotations: d.Annotations,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// save copts
|
// save copts
|
||||||
|
2
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
2
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
@ -161,6 +161,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
|||||||
"ioctl",
|
"ioctl",
|
||||||
"io_destroy",
|
"io_destroy",
|
||||||
"io_getevents",
|
"io_getevents",
|
||||||
|
"io_pgetevents",
|
||||||
"ioprio_get",
|
"ioprio_get",
|
||||||
"ioprio_set",
|
"ioprio_set",
|
||||||
"io_setup",
|
"io_setup",
|
||||||
@ -319,6 +320,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
|||||||
"stat64",
|
"stat64",
|
||||||
"statfs",
|
"statfs",
|
||||||
"statfs64",
|
"statfs64",
|
||||||
|
"statx",
|
||||||
"symlink",
|
"symlink",
|
||||||
"symlinkat",
|
"symlinkat",
|
||||||
"sync",
|
"sync",
|
||||||
|
4
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
4
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
@ -26,10 +26,10 @@ import (
|
|||||||
var (
|
var (
|
||||||
// DefaultRootDir is the default location used by containerd to store
|
// DefaultRootDir is the default location used by containerd to store
|
||||||
// persistent data
|
// persistent data
|
||||||
DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root")
|
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
||||||
// DefaultStateDir is the default location used by containerd to store
|
// DefaultStateDir is the default location used by containerd to store
|
||||||
// transient data
|
// transient data
|
||||||
DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state")
|
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
2
vendor/github.com/containerd/containerd/diff.go
generated
vendored
2
vendor/github.com/containerd/containerd/diff.go
generated
vendored
@ -83,6 +83,7 @@ func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
|
|||||||
MediaType: d.MediaType,
|
MediaType: d.MediaType,
|
||||||
Digest: d.Digest,
|
Digest: d.Digest,
|
||||||
Size: d.Size_,
|
Size: d.Size_,
|
||||||
|
Annotations: d.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,6 +92,7 @@ func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
|
|||||||
MediaType: d.MediaType,
|
MediaType: d.MediaType,
|
||||||
Digest: d.Digest,
|
Digest: d.Digest,
|
||||||
Size_: d.Size,
|
Size_: d.Size,
|
||||||
|
Annotations: d.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
2
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
@ -140,6 +140,7 @@ func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
|
|||||||
MediaType: desc.MediaType,
|
MediaType: desc.MediaType,
|
||||||
Size: desc.Size_,
|
Size: desc.Size_,
|
||||||
Digest: desc.Digest,
|
Digest: desc.Digest,
|
||||||
|
Annotations: desc.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,5 +149,6 @@ func descToProto(desc *ocispec.Descriptor) types.Descriptor {
|
|||||||
MediaType: desc.MediaType,
|
MediaType: desc.MediaType,
|
||||||
Size_: desc.Size,
|
Size_: desc.Size,
|
||||||
Digest: desc.Digest,
|
Digest: desc.Digest,
|
||||||
|
Annotations: desc.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
15
vendor/github.com/containerd/containerd/import.go
generated
vendored
15
vendor/github.com/containerd/containerd/import.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/images/archive"
|
"github.com/containerd/containerd/images/archive"
|
||||||
|
"github.com/containerd/containerd/platforms"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
@ -33,6 +34,7 @@ type importOpts struct {
|
|||||||
indexName string
|
indexName string
|
||||||
imageRefT func(string) string
|
imageRefT func(string) string
|
||||||
dgstRefT func(digest.Digest) string
|
dgstRefT func(digest.Digest) string
|
||||||
|
allPlatforms bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImportOpt allows the caller to specify import specific options
|
// ImportOpt allows the caller to specify import specific options
|
||||||
@ -64,6 +66,14 @@ func WithIndexName(name string) ImportOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithAllPlatforms is used to import content for all platforms.
|
||||||
|
func WithAllPlatforms(allPlatforms bool) ImportOpt {
|
||||||
|
return func(c *importOpts) error {
|
||||||
|
c.allPlatforms = allPlatforms
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Import imports an image from a Tar stream using reader.
|
// Import imports an image from a Tar stream using reader.
|
||||||
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
||||||
// Note that unreferrenced blobs may be imported to the content store as well.
|
// Note that unreferrenced blobs may be imported to the content store as well.
|
||||||
@ -98,6 +108,10 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||||||
Target: index,
|
Target: index,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
var platformMatcher = platforms.All
|
||||||
|
if !iopts.allPlatforms {
|
||||||
|
platformMatcher = platforms.Default()
|
||||||
|
}
|
||||||
|
|
||||||
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||||
// Only save images at top level
|
// Only save images at top level
|
||||||
@ -141,6 +155,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||||||
return idx.Manifests, nil
|
return idx.Manifests, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
handler = images.FilterPlatforms(handler, platformMatcher)
|
||||||
handler = images.SetChildrenLabels(cs, handler)
|
handler = images.SetChildrenLabels(cs, handler)
|
||||||
if err := images.Walk(ctx, handler, index); err != nil {
|
if err := images.Walk(ctx, handler, index); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
2
vendor/github.com/containerd/containerd/metadata/adaptors.go
generated
vendored
2
vendor/github.com/containerd/containerd/metadata/adaptors.go
generated
vendored
@ -51,6 +51,8 @@ func adaptImage(o interface{}) filters.Adaptor {
|
|||||||
return checkMap(fieldpath[1:], obj.Labels)
|
return checkMap(fieldpath[1:], obj.Labels)
|
||||||
// TODO(stevvooe): Greater/Less than filters would be awesome for
|
// TODO(stevvooe): Greater/Less than filters would be awesome for
|
||||||
// size. Let's do it!
|
// size. Let's do it!
|
||||||
|
case "annotations":
|
||||||
|
return checkMap(fieldpath[1:], obj.Target.Annotations)
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", false
|
return "", false
|
||||||
|
28
vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
generated
vendored
28
vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
bucketKeyAnnotations = []byte("annotations")
|
||||||
bucketKeyLabels = []byte("labels")
|
bucketKeyLabels = []byte("labels")
|
||||||
bucketKeyCreatedAt = []byte("createdat")
|
bucketKeyCreatedAt = []byte("createdat")
|
||||||
bucketKeyUpdatedAt = []byte("updatedat")
|
bucketKeyUpdatedAt = []byte("updatedat")
|
||||||
@ -32,7 +33,17 @@ var (
|
|||||||
// ReadLabels reads the labels key from the bucket
|
// ReadLabels reads the labels key from the bucket
|
||||||
// Uses the key "labels"
|
// Uses the key "labels"
|
||||||
func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
|
func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
|
||||||
lbkt := bkt.Bucket(bucketKeyLabels)
|
return readMap(bkt, bucketKeyLabels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAnnotations reads the OCI Descriptor Annotations key from the bucket
|
||||||
|
// Uses the key "annotations"
|
||||||
|
func ReadAnnotations(bkt *bolt.Bucket) (map[string]string, error) {
|
||||||
|
return readMap(bkt, bucketKeyAnnotations)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMap(bkt *bolt.Bucket, bucketName []byte) (map[string]string, error) {
|
||||||
|
lbkt := bkt.Bucket(bucketName)
|
||||||
if lbkt == nil {
|
if lbkt == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -53,9 +64,18 @@ func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
|
|||||||
// bucket. Typically, this removes zero-value entries.
|
// bucket. Typically, this removes zero-value entries.
|
||||||
// Uses the key "labels"
|
// Uses the key "labels"
|
||||||
func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
|
func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
|
||||||
|
return writeMap(bkt, bucketKeyLabels, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteAnnotations writes the OCI Descriptor Annotations
|
||||||
|
func WriteAnnotations(bkt *bolt.Bucket, labels map[string]string) error {
|
||||||
|
return writeMap(bkt, bucketKeyAnnotations, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeMap(bkt *bolt.Bucket, bucketName []byte, labels map[string]string) error {
|
||||||
// Remove existing labels to keep from merging
|
// Remove existing labels to keep from merging
|
||||||
if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil {
|
if lbkt := bkt.Bucket(bucketName); lbkt != nil {
|
||||||
if err := bkt.DeleteBucket(bucketKeyLabels); err != nil {
|
if err := bkt.DeleteBucket(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -64,7 +84,7 @@ func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
lbkt, err := bkt.CreateBucket(bucketKeyLabels)
|
lbkt, err := bkt.CreateBucket(bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
19
vendor/github.com/containerd/containerd/metadata/images.go
generated
vendored
19
vendor/github.com/containerd/containerd/metadata/images.go
generated
vendored
@ -192,6 +192,14 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths
|
|||||||
key := strings.TrimPrefix(path, "labels.")
|
key := strings.TrimPrefix(path, "labels.")
|
||||||
updated.Labels[key] = image.Labels[key]
|
updated.Labels[key] = image.Labels[key]
|
||||||
continue
|
continue
|
||||||
|
} else if strings.HasPrefix(path, "annotations.") {
|
||||||
|
if updated.Target.Annotations == nil {
|
||||||
|
updated.Target.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := strings.TrimPrefix(path, "annotations.")
|
||||||
|
updated.Target.Annotations[key] = image.Target.Annotations[key]
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
switch path {
|
switch path {
|
||||||
@ -204,6 +212,8 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths
|
|||||||
// make sense to modify the size or digest without touching the
|
// make sense to modify the size or digest without touching the
|
||||||
// mediatype, as well, for example.
|
// mediatype, as well, for example.
|
||||||
updated.Target = image.Target
|
updated.Target = image.Target
|
||||||
|
case "annotations":
|
||||||
|
updated.Target.Annotations = image.Target.Annotations
|
||||||
default:
|
default:
|
||||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name)
|
return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name)
|
||||||
}
|
}
|
||||||
@ -298,6 +308,11 @@ func readImage(image *images.Image, bkt *bolt.Bucket) error {
|
|||||||
}
|
}
|
||||||
image.Labels = labels
|
image.Labels = labels
|
||||||
|
|
||||||
|
image.Target.Annotations, err = boltutil.ReadAnnotations(bkt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
tbkt := bkt.Bucket(bucketKeyTarget)
|
tbkt := bkt.Bucket(bucketKeyTarget)
|
||||||
if tbkt == nil {
|
if tbkt == nil {
|
||||||
return errors.New("unable to read target bucket")
|
return errors.New("unable to read target bucket")
|
||||||
@ -331,6 +346,10 @@ func writeImage(bkt *bolt.Bucket, image *images.Image) error {
|
|||||||
return errors.Wrapf(err, "writing labels for image %v", image.Name)
|
return errors.Wrapf(err, "writing labels for image %v", image.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := boltutil.WriteAnnotations(bkt, image.Target.Annotations); err != nil {
|
||||||
|
return errors.Wrapf(err, "writing Annotations for image %v", image.Name)
|
||||||
|
}
|
||||||
|
|
||||||
// write the target bucket
|
// write the target bucket
|
||||||
tbkt, err := bkt.CreateBucketIfNotExists(bucketKeyTarget)
|
tbkt, err := bkt.CreateBucketIfNotExists(bucketKeyTarget)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
11
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
11
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
@ -741,7 +741,9 @@ func WithCapabilities(caps []string) SpecOpts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithAllCapabilities sets all linux capabilities for the process
|
// WithAllCapabilities sets all linux capabilities for the process
|
||||||
var WithAllCapabilities = WithCapabilities(GetAllCapabilities())
|
var WithAllCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
|
||||||
|
return WithCapabilities(GetAllCapabilities())(ctx, client, c, s)
|
||||||
|
}
|
||||||
|
|
||||||
// GetAllCapabilities returns all caps up to CAP_LAST_CAP
|
// GetAllCapabilities returns all caps up to CAP_LAST_CAP
|
||||||
// or CAP_BLOCK_SUSPEND on RHEL6
|
// or CAP_BLOCK_SUSPEND on RHEL6
|
||||||
@ -771,11 +773,14 @@ func capsContain(caps []string, s string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func removeCap(caps *[]string, s string) {
|
func removeCap(caps *[]string, s string) {
|
||||||
for i, c := range *caps {
|
var newcaps []string
|
||||||
|
for _, c := range *caps {
|
||||||
if c == s {
|
if c == s {
|
||||||
*caps = append((*caps)[:i], (*caps)[i+1:]...)
|
continue
|
||||||
}
|
}
|
||||||
|
newcaps = append(newcaps, c)
|
||||||
}
|
}
|
||||||
|
*caps = newcaps
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithAddedCapabilities adds the provided capabilities
|
// WithAddedCapabilities adds the provided capabilities
|
||||||
|
463
vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go
generated
vendored
Normal file
463
vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go
generated
vendored
Normal file
@ -0,0 +1,463 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package contentserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
api "github.com/containerd/containerd/api/services/content/v1"
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/log"
|
||||||
|
ptypes "github.com/gogo/protobuf/types"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
type service struct {
|
||||||
|
store content.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
var bufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
buffer := make([]byte, 1<<20)
|
||||||
|
return &buffer
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns the content GRPC server
|
||||||
|
func New(cs content.Store) api.ContentServer {
|
||||||
|
return &service{store: cs}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Register(server *grpc.Server) error {
|
||||||
|
api.RegisterContentServer(server, s)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
|
||||||
|
if err := req.Digest.Validate(); err != nil {
|
||||||
|
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
bi, err := s.store.Info(ctx, req.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &api.InfoResponse{
|
||||||
|
Info: infoToGRPC(bi),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
|
||||||
|
if err := req.Info.Digest.Validate(); err != nil {
|
||||||
|
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &api.UpdateResponse{
|
||||||
|
Info: infoToGRPC(info),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
|
||||||
|
var (
|
||||||
|
buffer []api.Info
|
||||||
|
sendBlock = func(block []api.Info) error {
|
||||||
|
// send last block
|
||||||
|
return session.Send(&api.ListContentResponse{
|
||||||
|
Info: block,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.store.Walk(session.Context(), func(info content.Info) error {
|
||||||
|
buffer = append(buffer, api.Info{
|
||||||
|
Digest: info.Digest,
|
||||||
|
Size_: info.Size,
|
||||||
|
CreatedAt: info.CreatedAt,
|
||||||
|
Labels: info.Labels,
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(buffer) >= 100 {
|
||||||
|
if err := sendBlock(buffer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer = buffer[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, req.Filters...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(buffer) > 0 {
|
||||||
|
// send last block
|
||||||
|
if err := sendBlock(buffer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) {
|
||||||
|
log.G(ctx).WithField("digest", req.Digest).Debugf("delete content")
|
||||||
|
if err := req.Digest.Validate(); err != nil {
|
||||||
|
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.store.Delete(ctx, req.Digest); err != nil {
|
||||||
|
return nil, errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ptypes.Empty{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
|
||||||
|
if err := req.Digest.Validate(); err != nil {
|
||||||
|
return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
oi, err := s.store.Info(session.Context(), req.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest})
|
||||||
|
if err != nil {
|
||||||
|
return errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
defer ra.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
offset = req.Offset
|
||||||
|
// size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of.
|
||||||
|
// offset+size can be larger than oi.Size.
|
||||||
|
size = req.Size_
|
||||||
|
|
||||||
|
// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
|
||||||
|
// little inefficient for work over a fast network. We can tune this later.
|
||||||
|
p = bufPool.Get().(*[]byte)
|
||||||
|
)
|
||||||
|
defer bufPool.Put(p)
|
||||||
|
|
||||||
|
if offset < 0 {
|
||||||
|
offset = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > oi.Size {
|
||||||
|
return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if size <= 0 || offset+size > oi.Size {
|
||||||
|
size = oi.Size - offset
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.CopyBuffer(
|
||||||
|
&readResponseWriter{session: session},
|
||||||
|
io.NewSectionReader(ra, offset, size), *p)
|
||||||
|
return errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readResponseWriter is a writer that places the output into ReadContentRequest messages.
|
||||||
|
//
|
||||||
|
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
|
||||||
|
// into the buffer size.
|
||||||
|
type readResponseWriter struct {
|
||||||
|
offset int64
|
||||||
|
session api.Content_ReadServer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
||||||
|
if err := rw.session.Send(&api.ReadContentResponse{
|
||||||
|
Offset: rw.offset,
|
||||||
|
Data: p,
|
||||||
|
}); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.offset += int64(len(p))
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
|
||||||
|
status, err := s.store.Status(ctx, req.Ref)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp api.StatusResponse
|
||||||
|
resp.Status = &api.Status{
|
||||||
|
StartedAt: status.StartedAt,
|
||||||
|
UpdatedAt: status.UpdatedAt,
|
||||||
|
Ref: status.Ref,
|
||||||
|
Offset: status.Offset,
|
||||||
|
Total: status.Total,
|
||||||
|
Expected: status.Expected,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
|
||||||
|
statuses, err := s.store.ListStatuses(ctx, req.Filters...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp api.ListStatusesResponse
|
||||||
|
for _, status := range statuses {
|
||||||
|
resp.Statuses = append(resp.Statuses, api.Status{
|
||||||
|
StartedAt: status.StartedAt,
|
||||||
|
UpdatedAt: status.UpdatedAt,
|
||||||
|
Ref: status.Ref,
|
||||||
|
Offset: status.Offset,
|
||||||
|
Total: status.Total,
|
||||||
|
Expected: status.Expected,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Write(session api.Content_WriteServer) (err error) {
|
||||||
|
var (
|
||||||
|
ctx = session.Context()
|
||||||
|
msg api.WriteContentResponse
|
||||||
|
req *api.WriteContentRequest
|
||||||
|
ref string
|
||||||
|
total int64
|
||||||
|
expected digest.Digest
|
||||||
|
)
|
||||||
|
|
||||||
|
defer func(msg *api.WriteContentResponse) {
|
||||||
|
// pump through the last message if no error was encountered
|
||||||
|
if err != nil {
|
||||||
|
if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists {
|
||||||
|
// TODO(stevvooe): Really need a log line here to track which
|
||||||
|
// errors are actually causing failure on the server side. May want
|
||||||
|
// to configure the service with an interceptor to make this work
|
||||||
|
// identically across all GRPC methods.
|
||||||
|
//
|
||||||
|
// This is pretty noisy, so we can remove it but leave it for now.
|
||||||
|
log.G(ctx).WithError(err).Error("(*service).Write failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = session.Send(msg)
|
||||||
|
}(&msg)
|
||||||
|
|
||||||
|
// handle the very first request!
|
||||||
|
req, err = session.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ref = req.Ref
|
||||||
|
|
||||||
|
if ref == "" {
|
||||||
|
return status.Errorf(codes.InvalidArgument, "first message must have a reference")
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := logrus.Fields{
|
||||||
|
"ref": ref,
|
||||||
|
}
|
||||||
|
total = req.Total
|
||||||
|
expected = req.Expected
|
||||||
|
if total > 0 {
|
||||||
|
fields["total"] = total
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected != "" {
|
||||||
|
fields["expected"] = expected
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
|
||||||
|
|
||||||
|
log.G(ctx).Debug("(*service).Write started")
|
||||||
|
// this action locks the writer for the session.
|
||||||
|
wr, err := s.store.Writer(ctx,
|
||||||
|
content.WithRef(ref),
|
||||||
|
content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected}))
|
||||||
|
if err != nil {
|
||||||
|
return errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
defer wr.Close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
msg.Action = req.Action
|
||||||
|
ws, err := wr.Status()
|
||||||
|
if err != nil {
|
||||||
|
return errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Offset = ws.Offset // always set the offset.
|
||||||
|
|
||||||
|
// NOTE(stevvooe): In general, there are two cases underwhich a remote
|
||||||
|
// writer is used.
|
||||||
|
//
|
||||||
|
// For pull, we almost always have this before fetching large content,
|
||||||
|
// through descriptors. We allow predeclaration of the expected size
|
||||||
|
// and digest.
|
||||||
|
//
|
||||||
|
// For push, it is more complex. If we want to cut through content into
|
||||||
|
// storage, we may have no expectation until we are done processing the
|
||||||
|
// content. The case here is the following:
|
||||||
|
//
|
||||||
|
// 1. Start writing content.
|
||||||
|
// 2. Compress inline.
|
||||||
|
// 3. Validate digest and size (maybe).
|
||||||
|
//
|
||||||
|
// Supporting these two paths is quite awkward but it lets both API
|
||||||
|
// users use the same writer style for each with a minimum of overhead.
|
||||||
|
if req.Expected != "" {
|
||||||
|
if expected != "" && expected != req.Expected {
|
||||||
|
log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected)
|
||||||
|
}
|
||||||
|
expected = req.Expected
|
||||||
|
|
||||||
|
if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
|
||||||
|
if err := wr.Close(); err != nil {
|
||||||
|
log.G(ctx).WithError(err).Error("failed to close writer")
|
||||||
|
}
|
||||||
|
if err := s.store.Abort(session.Context(), ref); err != nil {
|
||||||
|
log.G(ctx).WithError(err).Error("failed to abort write")
|
||||||
|
}
|
||||||
|
|
||||||
|
return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Total > 0 {
|
||||||
|
// Update the expected total. Typically, this could be seen at
|
||||||
|
// negotiation time or on a commit message.
|
||||||
|
if total > 0 && req.Total != total {
|
||||||
|
log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total)
|
||||||
|
}
|
||||||
|
total = req.Total
|
||||||
|
}
|
||||||
|
|
||||||
|
switch req.Action {
|
||||||
|
case api.WriteActionStat:
|
||||||
|
msg.Digest = wr.Digest()
|
||||||
|
msg.StartedAt = ws.StartedAt
|
||||||
|
msg.UpdatedAt = ws.UpdatedAt
|
||||||
|
msg.Total = total
|
||||||
|
case api.WriteActionWrite, api.WriteActionCommit:
|
||||||
|
if req.Offset > 0 {
|
||||||
|
// validate the offset if provided
|
||||||
|
if req.Offset != ws.Offset {
|
||||||
|
return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Offset == 0 && ws.Offset > 0 {
|
||||||
|
if err := wr.Truncate(req.Offset); err != nil {
|
||||||
|
return errors.Wrapf(err, "truncate failed")
|
||||||
|
}
|
||||||
|
msg.Offset = req.Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
// issue the write if we actually have data.
|
||||||
|
if len(req.Data) > 0 {
|
||||||
|
// While this looks like we could use io.WriterAt here, because we
|
||||||
|
// maintain the offset as append only, we just issue the write.
|
||||||
|
n, err := wr.Write(req.Data)
|
||||||
|
if err != nil {
|
||||||
|
return errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != len(req.Data) {
|
||||||
|
// TODO(stevvooe): Perhaps, we can recover this by including it
|
||||||
|
// in the offset on the write return.
|
||||||
|
return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Offset += int64(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Action == api.WriteActionCommit {
|
||||||
|
var opts []content.Opt
|
||||||
|
if req.Labels != nil {
|
||||||
|
opts = append(opts, content.WithLabels(req.Labels))
|
||||||
|
}
|
||||||
|
if err := wr.Commit(ctx, total, expected, opts...); err != nil {
|
||||||
|
return errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Digest = wr.Digest()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := session.Send(&msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err = session.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) {
|
||||||
|
if err := s.store.Abort(ctx, req.Ref); err != nil {
|
||||||
|
return nil, errdefs.ToGRPC(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ptypes.Empty{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func infoToGRPC(info content.Info) api.Info {
|
||||||
|
return api.Info{
|
||||||
|
Digest: info.Digest,
|
||||||
|
Size_: info.Size,
|
||||||
|
CreatedAt: info.CreatedAt,
|
||||||
|
UpdatedAt: info.UpdatedAt,
|
||||||
|
Labels: info.Labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func infoFromGRPC(info api.Info) content.Info {
|
||||||
|
return content.Info{
|
||||||
|
Digest: info.Digest,
|
||||||
|
Size: info.Size_,
|
||||||
|
CreatedAt: info.CreatedAt,
|
||||||
|
UpdatedAt: info.UpdatedAt,
|
||||||
|
Labels: info.Labels,
|
||||||
|
}
|
||||||
|
}
|
446
vendor/github.com/containerd/containerd/services/content/service.go
generated
vendored
446
vendor/github.com/containerd/containerd/services/content/service.go
generated
vendored
@ -17,39 +17,13 @@
|
|||||||
package content
|
package content
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
api "github.com/containerd/containerd/api/services/content/v1"
|
|
||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
"github.com/containerd/containerd/plugin"
|
"github.com/containerd/containerd/plugin"
|
||||||
"github.com/containerd/containerd/services"
|
"github.com/containerd/containerd/services"
|
||||||
ptypes "github.com/gogo/protobuf/types"
|
"github.com/containerd/containerd/services/content/contentserver"
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type service struct {
|
|
||||||
store content.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buffer := make([]byte, 1<<20)
|
|
||||||
return &buffer
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ api.ContentServer = &service{}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
plugin.Register(&plugin.Registration{
|
plugin.Register(&plugin.Registration{
|
||||||
Type: plugin.GRPCPlugin,
|
Type: plugin.GRPCPlugin,
|
||||||
@ -70,423 +44,7 @@ func init() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewService(cs.(content.Store)), nil
|
return contentserver.New(cs.(content.Store)), nil
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService returns the content GRPC server
|
|
||||||
func NewService(cs content.Store) api.ContentServer {
|
|
||||||
return &service{store: cs}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Register(server *grpc.Server) error {
|
|
||||||
api.RegisterContentServer(server, s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
|
|
||||||
if err := req.Digest.Validate(); err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
bi, err := s.store.Info(ctx, req.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &api.InfoResponse{
|
|
||||||
Info: infoToGRPC(bi),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
|
|
||||||
if err := req.Info.Digest.Validate(); err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &api.UpdateResponse{
|
|
||||||
Info: infoToGRPC(info),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
|
|
||||||
var (
|
|
||||||
buffer []api.Info
|
|
||||||
sendBlock = func(block []api.Info) error {
|
|
||||||
// send last block
|
|
||||||
return session.Send(&api.ListContentResponse{
|
|
||||||
Info: block,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := s.store.Walk(session.Context(), func(info content.Info) error {
|
|
||||||
buffer = append(buffer, api.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size_: info.Size,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
})
|
|
||||||
|
|
||||||
if len(buffer) >= 100 {
|
|
||||||
if err := sendBlock(buffer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer = buffer[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}, req.Filters...); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buffer) > 0 {
|
|
||||||
// send last block
|
|
||||||
if err := sendBlock(buffer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) {
|
|
||||||
log.G(ctx).WithField("digest", req.Digest).Debugf("delete content")
|
|
||||||
if err := req.Digest.Validate(); err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.store.Delete(ctx, req.Digest); err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ptypes.Empty{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
|
|
||||||
if err := req.Digest.Validate(); err != nil {
|
|
||||||
return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
oi, err := s.store.Info(session.Context(), req.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest})
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
defer ra.Close()
|
|
||||||
|
|
||||||
var (
|
|
||||||
offset = req.Offset
|
|
||||||
// size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of.
|
|
||||||
// offset+size can be larger than oi.Size.
|
|
||||||
size = req.Size_
|
|
||||||
|
|
||||||
// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
|
|
||||||
// little inefficient for work over a fast network. We can tune this later.
|
|
||||||
p = bufPool.Get().(*[]byte)
|
|
||||||
)
|
|
||||||
defer bufPool.Put(p)
|
|
||||||
|
|
||||||
if offset < 0 {
|
|
||||||
offset = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if offset > oi.Size {
|
|
||||||
return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if size <= 0 || offset+size > oi.Size {
|
|
||||||
size = oi.Size - offset
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.CopyBuffer(
|
|
||||||
&readResponseWriter{session: session},
|
|
||||||
io.NewSectionReader(ra, offset, size), *p)
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readResponseWriter is a writer that places the output into ReadContentRequest messages.
|
|
||||||
//
|
|
||||||
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
|
|
||||||
// into the buffer size.
|
|
||||||
type readResponseWriter struct {
|
|
||||||
offset int64
|
|
||||||
session api.Content_ReadServer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
|
||||||
if err := rw.session.Send(&api.ReadContentResponse{
|
|
||||||
Offset: rw.offset,
|
|
||||||
Data: p,
|
|
||||||
}); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.offset += int64(len(p))
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
|
|
||||||
status, err := s.store.Status(ctx, req.Ref)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp api.StatusResponse
|
|
||||||
resp.Status = &api.Status{
|
|
||||||
StartedAt: status.StartedAt,
|
|
||||||
UpdatedAt: status.UpdatedAt,
|
|
||||||
Ref: status.Ref,
|
|
||||||
Offset: status.Offset,
|
|
||||||
Total: status.Total,
|
|
||||||
Expected: status.Expected,
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
|
|
||||||
statuses, err := s.store.ListStatuses(ctx, req.Filters...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp api.ListStatusesResponse
|
|
||||||
for _, status := range statuses {
|
|
||||||
resp.Statuses = append(resp.Statuses, api.Status{
|
|
||||||
StartedAt: status.StartedAt,
|
|
||||||
UpdatedAt: status.UpdatedAt,
|
|
||||||
Ref: status.Ref,
|
|
||||||
Offset: status.Offset,
|
|
||||||
Total: status.Total,
|
|
||||||
Expected: status.Expected,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Write(session api.Content_WriteServer) (err error) {
|
|
||||||
var (
|
|
||||||
ctx = session.Context()
|
|
||||||
msg api.WriteContentResponse
|
|
||||||
req *api.WriteContentRequest
|
|
||||||
ref string
|
|
||||||
total int64
|
|
||||||
expected digest.Digest
|
|
||||||
)
|
|
||||||
|
|
||||||
defer func(msg *api.WriteContentResponse) {
|
|
||||||
// pump through the last message if no error was encountered
|
|
||||||
if err != nil {
|
|
||||||
if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists {
|
|
||||||
// TODO(stevvooe): Really need a log line here to track which
|
|
||||||
// errors are actually causing failure on the server side. May want
|
|
||||||
// to configure the service with an interceptor to make this work
|
|
||||||
// identically across all GRPC methods.
|
|
||||||
//
|
|
||||||
// This is pretty noisy, so we can remove it but leave it for now.
|
|
||||||
log.G(ctx).WithError(err).Error("(*service).Write failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = session.Send(msg)
|
|
||||||
}(&msg)
|
|
||||||
|
|
||||||
// handle the very first request!
|
|
||||||
req, err = session.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ref = req.Ref
|
|
||||||
|
|
||||||
if ref == "" {
|
|
||||||
return status.Errorf(codes.InvalidArgument, "first message must have a reference")
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := logrus.Fields{
|
|
||||||
"ref": ref,
|
|
||||||
}
|
|
||||||
total = req.Total
|
|
||||||
expected = req.Expected
|
|
||||||
if total > 0 {
|
|
||||||
fields["total"] = total
|
|
||||||
}
|
|
||||||
|
|
||||||
if expected != "" {
|
|
||||||
fields["expected"] = expected
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
|
|
||||||
|
|
||||||
log.G(ctx).Debug("(*service).Write started")
|
|
||||||
// this action locks the writer for the session.
|
|
||||||
wr, err := s.store.Writer(ctx,
|
|
||||||
content.WithRef(ref),
|
|
||||||
content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected}))
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
defer wr.Close()
|
|
||||||
|
|
||||||
for {
|
|
||||||
msg.Action = req.Action
|
|
||||||
ws, err := wr.Status()
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.Offset = ws.Offset // always set the offset.
|
|
||||||
|
|
||||||
// NOTE(stevvooe): In general, there are two cases underwhich a remote
|
|
||||||
// writer is used.
|
|
||||||
//
|
|
||||||
// For pull, we almost always have this before fetching large content,
|
|
||||||
// through descriptors. We allow predeclaration of the expected size
|
|
||||||
// and digest.
|
|
||||||
//
|
|
||||||
// For push, it is more complex. If we want to cut through content into
|
|
||||||
// storage, we may have no expectation until we are done processing the
|
|
||||||
// content. The case here is the following:
|
|
||||||
//
|
|
||||||
// 1. Start writing content.
|
|
||||||
// 2. Compress inline.
|
|
||||||
// 3. Validate digest and size (maybe).
|
|
||||||
//
|
|
||||||
// Supporting these two paths is quite awkward but it lets both API
|
|
||||||
// users use the same writer style for each with a minimum of overhead.
|
|
||||||
if req.Expected != "" {
|
|
||||||
if expected != "" && expected != req.Expected {
|
|
||||||
log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected)
|
|
||||||
}
|
|
||||||
expected = req.Expected
|
|
||||||
|
|
||||||
if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
|
|
||||||
if err := wr.Close(); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Error("failed to close writer")
|
|
||||||
}
|
|
||||||
if err := s.store.Abort(session.Context(), ref); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Error("failed to abort write")
|
|
||||||
}
|
|
||||||
|
|
||||||
return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Total > 0 {
|
|
||||||
// Update the expected total. Typically, this could be seen at
|
|
||||||
// negotiation time or on a commit message.
|
|
||||||
if total > 0 && req.Total != total {
|
|
||||||
log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total)
|
|
||||||
}
|
|
||||||
total = req.Total
|
|
||||||
}
|
|
||||||
|
|
||||||
switch req.Action {
|
|
||||||
case api.WriteActionStat:
|
|
||||||
msg.Digest = wr.Digest()
|
|
||||||
msg.StartedAt = ws.StartedAt
|
|
||||||
msg.UpdatedAt = ws.UpdatedAt
|
|
||||||
msg.Total = total
|
|
||||||
case api.WriteActionWrite, api.WriteActionCommit:
|
|
||||||
if req.Offset > 0 {
|
|
||||||
// validate the offset if provided
|
|
||||||
if req.Offset != ws.Offset {
|
|
||||||
return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Offset == 0 && ws.Offset > 0 {
|
|
||||||
if err := wr.Truncate(req.Offset); err != nil {
|
|
||||||
return errors.Wrapf(err, "truncate failed")
|
|
||||||
}
|
|
||||||
msg.Offset = req.Offset
|
|
||||||
}
|
|
||||||
|
|
||||||
// issue the write if we actually have data.
|
|
||||||
if len(req.Data) > 0 {
|
|
||||||
// While this looks like we could use io.WriterAt here, because we
|
|
||||||
// maintain the offset as append only, we just issue the write.
|
|
||||||
n, err := wr.Write(req.Data)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != len(req.Data) {
|
|
||||||
// TODO(stevvooe): Perhaps, we can recover this by including it
|
|
||||||
// in the offset on the write return.
|
|
||||||
return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.Offset += int64(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Action == api.WriteActionCommit {
|
|
||||||
var opts []content.Opt
|
|
||||||
if req.Labels != nil {
|
|
||||||
opts = append(opts, content.WithLabels(req.Labels))
|
|
||||||
}
|
|
||||||
if err := wr.Commit(ctx, total, expected, opts...); err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.Digest = wr.Digest()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := session.Send(&msg); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err = session.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) {
|
|
||||||
if err := s.store.Abort(ctx, req.Ref); err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ptypes.Empty{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func infoToGRPC(info content.Info) api.Info {
|
|
||||||
return api.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size_: info.Size,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
UpdatedAt: info.UpdatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func infoFromGRPC(info api.Info) content.Info {
|
|
||||||
return content.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size: info.Size_,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
UpdatedAt: info.UpdatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
2
vendor/github.com/containerd/containerd/services/diff/local.go
generated
vendored
2
vendor/github.com/containerd/containerd/services/diff/local.go
generated
vendored
@ -167,6 +167,7 @@ func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
|
|||||||
MediaType: d.MediaType,
|
MediaType: d.MediaType,
|
||||||
Digest: d.Digest,
|
Digest: d.Digest,
|
||||||
Size: d.Size_,
|
Size: d.Size_,
|
||||||
|
Annotations: d.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,5 +176,6 @@ func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
|
|||||||
MediaType: d.MediaType,
|
MediaType: d.MediaType,
|
||||||
Digest: d.Digest,
|
Digest: d.Digest,
|
||||||
Size_: d.Size,
|
Size_: d.Size,
|
||||||
|
Annotations: d.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/services/images/helpers.go
generated
vendored
2
vendor/github.com/containerd/containerd/services/images/helpers.go
generated
vendored
@ -58,6 +58,7 @@ func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
|
|||||||
MediaType: desc.MediaType,
|
MediaType: desc.MediaType,
|
||||||
Size: desc.Size_,
|
Size: desc.Size_,
|
||||||
Digest: desc.Digest,
|
Digest: desc.Digest,
|
||||||
|
Annotations: desc.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,5 +67,6 @@ func descToProto(desc *ocispec.Descriptor) types.Descriptor {
|
|||||||
MediaType: desc.MediaType,
|
MediaType: desc.MediaType,
|
||||||
Size_: desc.Size,
|
Size_: desc.Size,
|
||||||
Digest: desc.Digest,
|
Digest: desc.Digest,
|
||||||
|
Annotations: desc.Annotations,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/containerd/containerd/services/server/server.go
generated
vendored
5
vendor/github.com/containerd/containerd/services/server/server.go
generated
vendored
@ -43,6 +43,7 @@ import (
|
|||||||
srvconfig "github.com/containerd/containerd/services/server/config"
|
srvconfig "github.com/containerd/containerd/services/server/config"
|
||||||
"github.com/containerd/containerd/snapshots"
|
"github.com/containerd/containerd/snapshots"
|
||||||
ssproxy "github.com/containerd/containerd/snapshots/proxy"
|
ssproxy "github.com/containerd/containerd/snapshots/proxy"
|
||||||
|
"github.com/containerd/containerd/sys"
|
||||||
metrics "github.com/docker/go-metrics"
|
metrics "github.com/docker/go-metrics"
|
||||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -61,10 +62,10 @@ func CreateTopLevelDirectories(config *srvconfig.Config) error {
|
|||||||
return errors.New("root and state must be different paths")
|
return errors.New("root and state must be different paths")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(config.Root, 0711); err != nil {
|
if err := sys.MkdirAllWithACL(config.Root, 0711); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(config.State, 0711); err != nil {
|
if err := sys.MkdirAllWithACL(config.State, 0711); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
2
vendor/github.com/containerd/containerd/services/tasks/local.go
generated
vendored
2
vendor/github.com/containerd/containerd/services/tasks/local.go
generated
vendored
@ -147,6 +147,7 @@ func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc.
|
|||||||
MediaType: r.Checkpoint.MediaType,
|
MediaType: r.Checkpoint.MediaType,
|
||||||
Digest: r.Checkpoint.Digest,
|
Digest: r.Checkpoint.Digest,
|
||||||
Size: r.Checkpoint.Size_,
|
Size: r.Checkpoint.Size_,
|
||||||
|
Annotations: r.Checkpoint.Annotations,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -628,6 +629,7 @@ func (l *local) writeContent(ctx context.Context, mediaType, ref string, r io.Re
|
|||||||
MediaType: mediaType,
|
MediaType: mediaType,
|
||||||
Digest: writer.Digest(),
|
Digest: writer.Digest(),
|
||||||
Size_: size,
|
Size_: size,
|
||||||
|
Annotations: make(map[string]string),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
vendor/github.com/containerd/containerd/sys/filesys_unix.go
generated
vendored
5
vendor/github.com/containerd/containerd/sys/filesys_unix.go
generated
vendored
@ -24,3 +24,8 @@ import "os"
|
|||||||
func ForceRemoveAll(path string) error {
|
func ForceRemoveAll(path string) error {
|
||||||
return os.RemoveAll(path)
|
return os.RemoveAll(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems.
|
||||||
|
func MkdirAllWithACL(path string, perm os.FileMode) error {
|
||||||
|
return os.MkdirAll(path, perm)
|
||||||
|
}
|
||||||
|
10
vendor/github.com/containerd/containerd/sys/filesys_windows.go
generated
vendored
10
vendor/github.com/containerd/containerd/sys/filesys_windows.go
generated
vendored
@ -30,6 +30,11 @@ import (
|
|||||||
"github.com/Microsoft/hcsshim"
|
"github.com/Microsoft/hcsshim"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
|
||||||
|
SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
||||||
|
)
|
||||||
|
|
||||||
// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
|
// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
|
||||||
// ACL'd for Builtin Administrators and Local System.
|
// ACL'd for Builtin Administrators and Local System.
|
||||||
func MkdirAllWithACL(path string, perm os.FileMode) error {
|
func MkdirAllWithACL(path string, perm os.FileMode) error {
|
||||||
@ -78,7 +83,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error {
|
|||||||
|
|
||||||
if j > 1 {
|
if j > 1 {
|
||||||
// Create parent
|
// Create parent
|
||||||
err = mkdirall(path[0:j-1], false)
|
err = mkdirall(path[0:j-1], adminAndLocalSystem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -112,8 +117,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error {
|
|||||||
// and Local System.
|
// and Local System.
|
||||||
func mkdirWithACL(name string) error {
|
func mkdirWithACL(name string) error {
|
||||||
sa := syscall.SecurityAttributes{Length: 0}
|
sa := syscall.SecurityAttributes{Length: 0}
|
||||||
sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
sd, err := winio.SddlToSecurityDescriptor(SddlAdministratorsLocalSystem)
|
||||||
sd, err := winio.SddlToSecurityDescriptor(sddl)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containerd/containerd/task.go
generated
vendored
4
vendor/github.com/containerd/containerd/task.go
generated
vendored
@ -521,6 +521,9 @@ func (t *task) Update(ctx context.Context, opts ...UpdateTaskOpts) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *task) LoadProcess(ctx context.Context, id string, ioAttach cio.Attach) (Process, error) {
|
func (t *task) LoadProcess(ctx context.Context, id string, ioAttach cio.Attach) (Process, error) {
|
||||||
|
if id == t.id && ioAttach == nil {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
|
response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
|
||||||
ContainerID: t.id,
|
ContainerID: t.id,
|
||||||
ExecID: id,
|
ExecID: id,
|
||||||
@ -582,6 +585,7 @@ func (t *task) checkpointTask(ctx context.Context, index *v1.Index, request *tas
|
|||||||
OS: goruntime.GOOS,
|
OS: goruntime.GOOS,
|
||||||
Architecture: goruntime.GOARCH,
|
Architecture: goruntime.GOARCH,
|
||||||
},
|
},
|
||||||
|
Annotations: d.Annotations,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
1
vendor/github.com/containerd/containerd/task_opts.go
generated
vendored
1
vendor/github.com/containerd/containerd/task_opts.go
generated
vendored
@ -62,6 +62,7 @@ func WithTaskCheckpoint(im Image) NewTaskOpts {
|
|||||||
MediaType: m.MediaType,
|
MediaType: m.MediaType,
|
||||||
Size_: m.Size,
|
Size_: m.Size,
|
||||||
Digest: m.Digest,
|
Digest: m.Digest,
|
||||||
|
Annotations: m.Annotations,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
4
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
@ -33,8 +33,8 @@ github.com/opencontainers/image-spec v1.0.1
|
|||||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||||
github.com/Microsoft/go-winio v0.4.12
|
github.com/Microsoft/go-winio c599b533b43b1363d7d7c6cfda5ede70ed73ff13
|
||||||
github.com/Microsoft/hcsshim v0.8.5
|
github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517
|
||||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||||
github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
|
github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
|
||||||
|
Loading…
Reference in New Issue
Block a user