Merge pull request #7320 from dmcgowan/transfer-service

Transfer service
This commit is contained in:
Phil Estes 2022-11-30 18:51:16 -08:00 committed by GitHub
commit ae6c244995
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
53 changed files with 7316 additions and 21 deletions

View File

@ -4972,6 +4972,35 @@ file {
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/services/streaming/v1/streaming.proto"
package: "containerd.services.streaming.v1"
dependency: "google/protobuf/any.proto"
message_type {
name: "StreamInit"
field {
name: "id"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "id"
}
}
service {
name: "Streaming"
method {
name: "Stream"
input_type: ".google.protobuf.Any"
output_type: ".google.protobuf.Any"
client_streaming: true
server_streaming: true
}
}
options {
go_package: "github.com/containerd/containerd/api/services/streaming/v1;streaming"
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/types/metrics.proto"
package: "containerd.types"
@ -5653,6 +5682,61 @@ file {
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/services/transfer/v1/transfer.proto"
package: "containerd.services.transfer.v1"
dependency: "google/protobuf/any.proto"
dependency: "google/protobuf/empty.proto"
message_type {
name: "TransferRequest"
field {
name: "source"
number: 1
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".google.protobuf.Any"
json_name: "source"
}
field {
name: "destination"
number: 2
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".google.protobuf.Any"
json_name: "destination"
}
field {
name: "options"
number: 3
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".containerd.services.transfer.v1.TransferOptions"
json_name: "options"
}
}
message_type {
name: "TransferOptions"
field {
name: "progress_stream"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "progressStream"
}
}
service {
name: "Transfer"
method {
name: "Transfer"
input_type: ".containerd.services.transfer.v1.TransferRequest"
output_type: ".google.protobuf.Empty"
}
}
options {
go_package: "github.com/containerd/containerd/api/services/transfer/v1;transfer"
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto"
package: "containerd.services.events.ttrpc.v1"
@ -5754,3 +5838,390 @@ file {
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/types/transfer/imagestore.proto"
package: "containerd.types.transfer"
dependency: "github.com/containerd/containerd/api/types/platform.proto"
message_type {
name: "ImageStore"
field {
name: "name"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "name"
}
field {
name: "labels"
number: 2
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".containerd.types.transfer.ImageStore.LabelsEntry"
json_name: "labels"
}
field {
name: "platforms"
number: 3
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".containerd.types.Platform"
json_name: "platforms"
}
field {
name: "all_metadata"
number: 4
label: LABEL_OPTIONAL
type: TYPE_BOOL
json_name: "allMetadata"
}
field {
name: "manifest_limit"
number: 5
label: LABEL_OPTIONAL
type: TYPE_UINT32
json_name: "manifestLimit"
}
field {
name: "prefix"
number: 6
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "prefix"
}
field {
name: "check_prefix"
number: 7
label: LABEL_OPTIONAL
type: TYPE_BOOL
json_name: "checkPrefix"
}
field {
name: "digest_refs"
number: 8
label: LABEL_OPTIONAL
type: TYPE_BOOL
json_name: "digestRefs"
}
field {
name: "always_digest"
number: 9
label: LABEL_OPTIONAL
type: TYPE_BOOL
json_name: "alwaysDigest"
}
field {
name: "unpacks"
number: 10
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".containerd.types.transfer.UnpackConfiguration"
json_name: "unpacks"
}
nested_type {
name: "LabelsEntry"
field {
name: "key"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "key"
}
field {
name: "value"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "value"
}
options {
map_entry: true
}
}
}
message_type {
name: "UnpackConfiguration"
field {
name: "platform"
number: 1
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".containerd.types.Platform"
json_name: "platform"
}
field {
name: "snapshotter"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "snapshotter"
}
}
options {
go_package: "github.com/containerd/containerd/api/types/transfer"
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/types/transfer/importexport.proto"
package: "containerd.types.transfer"
message_type {
name: "ImageImportStream"
field {
name: "stream"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "stream"
}
field {
name: "media_type"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "mediaType"
}
field {
name: "force_compress"
number: 3
label: LABEL_OPTIONAL
type: TYPE_BOOL
json_name: "forceCompress"
}
}
message_type {
name: "ImageExportStream"
field {
name: "stream"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "stream"
}
field {
name: "media_type"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "mediaType"
}
}
options {
go_package: "github.com/containerd/containerd/api/types/transfer"
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/types/transfer/progress.proto"
package: "containerd.types.transfer"
message_type {
name: "Progress"
field {
name: "event"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "event"
}
field {
name: "name"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "name"
}
field {
name: "parents"
number: 3
label: LABEL_REPEATED
type: TYPE_STRING
json_name: "parents"
}
field {
name: "progress"
number: 4
label: LABEL_OPTIONAL
type: TYPE_INT64
json_name: "progress"
}
field {
name: "total"
number: 5
label: LABEL_OPTIONAL
type: TYPE_INT64
json_name: "total"
}
}
options {
go_package: "github.com/containerd/containerd/api/types/transfer"
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/types/transfer/registry.proto"
package: "containerd.types.transfer"
dependency: "google/protobuf/timestamp.proto"
message_type {
name: "OCIRegistry"
field {
name: "reference"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "reference"
}
field {
name: "resolver"
number: 2
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".containerd.types.transfer.RegistryResolver"
json_name: "resolver"
}
}
message_type {
name: "RegistryResolver"
field {
name: "auth_stream"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "authStream"
}
field {
name: "headers"
number: 2
label: LABEL_REPEATED
type: TYPE_MESSAGE
type_name: ".containerd.types.transfer.RegistryResolver.HeadersEntry"
json_name: "headers"
}
nested_type {
name: "HeadersEntry"
field {
name: "key"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "key"
}
field {
name: "value"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "value"
}
options {
map_entry: true
}
}
}
message_type {
name: "AuthRequest"
field {
name: "host"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "host"
}
field {
name: "reference"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "reference"
}
field {
name: "wwwauthenticate"
number: 3
label: LABEL_REPEATED
type: TYPE_STRING
json_name: "wwwauthenticate"
}
}
message_type {
name: "AuthResponse"
field {
name: "authType"
number: 1
label: LABEL_OPTIONAL
type: TYPE_ENUM
type_name: ".containerd.types.transfer.AuthType"
json_name: "authType"
}
field {
name: "secret"
number: 2
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "secret"
}
field {
name: "username"
number: 3
label: LABEL_OPTIONAL
type: TYPE_STRING
json_name: "username"
}
field {
name: "expire_at"
number: 4
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: ".google.protobuf.Timestamp"
json_name: "expireAt"
}
}
enum_type {
name: "AuthType"
value {
name: "NONE"
number: 0
}
value {
name: "CREDENTIALS"
number: 1
}
value {
name: "REFRESH"
number: 2
}
value {
name: "HEADER"
number: 3
}
}
options {
go_package: "github.com/containerd/containerd/api/types/transfer"
}
syntax: "proto3"
}
file {
name: "github.com/containerd/containerd/api/types/transfer/streaming.proto"
package: "containerd.types.transfer"
message_type {
name: "Data"
field {
name: "data"
number: 1
label: LABEL_OPTIONAL
type: TYPE_BYTES
json_name: "data"
}
}
message_type {
name: "WindowUpdate"
field {
name: "update"
number: 1
label: LABEL_OPTIONAL
type: TYPE_INT32
json_name: "update"
}
}
options {
go_package: "github.com/containerd/containerd/api/types/transfer"
}
syntax: "proto3"
}

View File

@ -0,0 +1,17 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package streaming

View File

@ -0,0 +1,175 @@
//
//Copyright The containerd Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.20.1
// source: github.com/containerd/containerd/api/services/streaming/v1/streaming.proto
package streaming
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type StreamInit struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *StreamInit) Reset() {
*x = StreamInit{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamInit) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamInit) ProtoMessage() {}
func (x *StreamInit) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamInit.ProtoReflect.Descriptor instead.
func (*StreamInit) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescGZIP(), []int{0}
}
func (x *StreamInit) GetID() string {
if x != nil {
return x.ID
}
return ""
}
var File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc = []byte{
0x0a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x19,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1c, 0x0a, 0x0a, 0x53, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0x45, 0x0a, 0x09, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x69, 0x6e, 0x67, 0x12, 0x38, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x14,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x41, 0x6e, 0x79, 0x1a, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46,
0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescOnce sync.Once
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData = file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc
)
func file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescGZIP() []byte {
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescOnce.Do(func() {
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData)
})
return file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData
}
var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes = []interface{}{
(*StreamInit)(nil), // 0: containerd.services.streaming.v1.StreamInit
(*anypb.Any)(nil), // 1: google.protobuf.Any
}
var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs = []int32{
1, // 0: containerd.services.streaming.v1.Streaming.Stream:input_type -> google.protobuf.Any
1, // 1: containerd.services.streaming.v1.Streaming.Stream:output_type -> google.protobuf.Any
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_init() }
func file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_init() {
if File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamInit); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes,
DependencyIndexes: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs,
MessageInfos: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes,
}.Build()
File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto = out.File
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc = nil
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes = nil
file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs = nil
}

View File

@ -0,0 +1,31 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.services.streaming.v1;
import "google/protobuf/any.proto";
option go_package = "github.com/containerd/containerd/api/services/streaming/v1;streaming";
service Streaming {
rpc Stream(stream google.protobuf.Any) returns (stream google.protobuf.Any);
}
message StreamInit {
string id = 1;
}

View File

@ -0,0 +1,138 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.20.1
// source: github.com/containerd/containerd/api/services/streaming/v1/streaming.proto
package streaming
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
anypb "google.golang.org/protobuf/types/known/anypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// StreamingClient is the client API for Streaming service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type StreamingClient interface {
Stream(ctx context.Context, opts ...grpc.CallOption) (Streaming_StreamClient, error)
}
type streamingClient struct {
cc grpc.ClientConnInterface
}
func NewStreamingClient(cc grpc.ClientConnInterface) StreamingClient {
return &streamingClient{cc}
}
func (c *streamingClient) Stream(ctx context.Context, opts ...grpc.CallOption) (Streaming_StreamClient, error) {
stream, err := c.cc.NewStream(ctx, &Streaming_ServiceDesc.Streams[0], "/containerd.services.streaming.v1.Streaming/Stream", opts...)
if err != nil {
return nil, err
}
x := &streamingStreamClient{stream}
return x, nil
}
type Streaming_StreamClient interface {
Send(*anypb.Any) error
Recv() (*anypb.Any, error)
grpc.ClientStream
}
type streamingStreamClient struct {
grpc.ClientStream
}
func (x *streamingStreamClient) Send(m *anypb.Any) error {
return x.ClientStream.SendMsg(m)
}
func (x *streamingStreamClient) Recv() (*anypb.Any, error) {
m := new(anypb.Any)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// StreamingServer is the server API for Streaming service.
// All implementations must embed UnimplementedStreamingServer
// for forward compatibility
type StreamingServer interface {
Stream(Streaming_StreamServer) error
mustEmbedUnimplementedStreamingServer()
}
// UnimplementedStreamingServer must be embedded to have forward compatible implementations.
type UnimplementedStreamingServer struct {
}
func (UnimplementedStreamingServer) Stream(Streaming_StreamServer) error {
return status.Errorf(codes.Unimplemented, "method Stream not implemented")
}
func (UnimplementedStreamingServer) mustEmbedUnimplementedStreamingServer() {}
// UnsafeStreamingServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to StreamingServer will
// result in compilation errors.
type UnsafeStreamingServer interface {
mustEmbedUnimplementedStreamingServer()
}
func RegisterStreamingServer(s grpc.ServiceRegistrar, srv StreamingServer) {
s.RegisterService(&Streaming_ServiceDesc, srv)
}
func _Streaming_Stream_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(StreamingServer).Stream(&streamingStreamServer{stream})
}
type Streaming_StreamServer interface {
Send(*anypb.Any) error
Recv() (*anypb.Any, error)
grpc.ServerStream
}
type streamingStreamServer struct {
grpc.ServerStream
}
func (x *streamingStreamServer) Send(m *anypb.Any) error {
return x.ServerStream.SendMsg(m)
}
func (x *streamingStreamServer) Recv() (*anypb.Any, error) {
m := new(anypb.Any)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Streaming_ServiceDesc is the grpc.ServiceDesc for Streaming service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Streaming_ServiceDesc = grpc.ServiceDesc{
ServiceName: "containerd.services.streaming.v1.Streaming",
HandlerType: (*StreamingServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Stream",
Handler: _Streaming_Stream_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "github.com/containerd/containerd/api/services/streaming/v1/streaming.proto",
}

View File

@ -0,0 +1,17 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package transfer

View File

@ -0,0 +1,274 @@
//
//Copyright The containerd Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.20.1
// source: github.com/containerd/containerd/api/services/transfer/v1/transfer.proto
package transfer
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type TransferRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Source *anypb.Any `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
Destination *anypb.Any `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
Options *TransferOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
}
func (x *TransferRequest) Reset() {
*x = TransferRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TransferRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TransferRequest) ProtoMessage() {}
func (x *TransferRequest) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TransferRequest.ProtoReflect.Descriptor instead.
func (*TransferRequest) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP(), []int{0}
}
func (x *TransferRequest) GetSource() *anypb.Any {
if x != nil {
return x.Source
}
return nil
}
func (x *TransferRequest) GetDestination() *anypb.Any {
if x != nil {
return x.Destination
}
return nil
}
func (x *TransferRequest) GetOptions() *TransferOptions {
if x != nil {
return x.Options
}
return nil
}
type TransferOptions struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ProgressStream string `protobuf:"bytes,1,opt,name=progress_stream,json=progressStream,proto3" json:"progress_stream,omitempty"` // Progress min interval
}
func (x *TransferOptions) Reset() {
*x = TransferOptions{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TransferOptions) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TransferOptions) ProtoMessage() {}
func (x *TransferOptions) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TransferOptions.ProtoReflect.Descriptor instead.
func (*TransferOptions) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP(), []int{1}
}
func (x *TransferOptions) GetProgressStream() string {
if x != nil {
return x.ProgressStream
}
return ""
}
var File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc = []byte{
0x0a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e,
0x73, 0x66, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x63, 0x6f, 0x6e, 0x74,
0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a,
0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30,
0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31,
0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3a, 0x0a, 0x0f, 0x54, 0x72, 0x61,
0x6e, 0x73, 0x66, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f,
0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x32, 0x60, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65,
0x72, 0x12, 0x54, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x30, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65,
0x72, 0x2f, 0x76, 0x31, 0x3b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescOnce sync.Once
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData = file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc
)
func file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP() []byte {
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescOnce.Do(func() {
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData)
})
return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData
}
var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes = []interface{}{
(*TransferRequest)(nil), // 0: containerd.services.transfer.v1.TransferRequest
(*TransferOptions)(nil), // 1: containerd.services.transfer.v1.TransferOptions
(*anypb.Any)(nil), // 2: google.protobuf.Any
(*emptypb.Empty)(nil), // 3: google.protobuf.Empty
}
var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs = []int32{
2, // 0: containerd.services.transfer.v1.TransferRequest.source:type_name -> google.protobuf.Any
2, // 1: containerd.services.transfer.v1.TransferRequest.destination:type_name -> google.protobuf.Any
1, // 2: containerd.services.transfer.v1.TransferRequest.options:type_name -> containerd.services.transfer.v1.TransferOptions
0, // 3: containerd.services.transfer.v1.Transfer.Transfer:input_type -> containerd.services.transfer.v1.TransferRequest
3, // 4: containerd.services.transfer.v1.Transfer.Transfer:output_type -> google.protobuf.Empty
4, // [4:5] is the sub-list for method output_type
3, // [3:4] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_init() }
func file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_init() {
if File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TransferRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TransferOptions); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes,
DependencyIndexes: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs,
MessageInfos: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes,
}.Build()
File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto = out.File
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc = nil
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes = nil
file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs = nil
}

View File

@ -0,0 +1,39 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.services.transfer.v1;
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
option go_package = "github.com/containerd/containerd/api/services/transfer/v1;transfer";
service Transfer {
rpc Transfer(TransferRequest) returns (google.protobuf.Empty);
}
message TransferRequest {
google.protobuf.Any source = 1;
google.protobuf.Any destination = 2;
TransferOptions options = 3;
}
message TransferOptions {
string progress_stream = 1;
// Progress min interval
}

View File

@ -0,0 +1,106 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.20.1
// source: github.com/containerd/containerd/api/services/transfer/v1/transfer.proto
package transfer
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// TransferClient is the client API for Transfer service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type TransferClient interface {
Transfer(ctx context.Context, in *TransferRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type transferClient struct {
cc grpc.ClientConnInterface
}
func NewTransferClient(cc grpc.ClientConnInterface) TransferClient {
return &transferClient{cc}
}
func (c *transferClient) Transfer(ctx context.Context, in *TransferRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/containerd.services.transfer.v1.Transfer/Transfer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// TransferServer is the server API for Transfer service.
// All implementations must embed UnimplementedTransferServer
// for forward compatibility
type TransferServer interface {
Transfer(context.Context, *TransferRequest) (*emptypb.Empty, error)
mustEmbedUnimplementedTransferServer()
}
// UnimplementedTransferServer must be embedded to have forward compatible implementations.
type UnimplementedTransferServer struct {
}
func (UnimplementedTransferServer) Transfer(context.Context, *TransferRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented")
}
func (UnimplementedTransferServer) mustEmbedUnimplementedTransferServer() {}
// UnsafeTransferServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to TransferServer will
// result in compilation errors.
type UnsafeTransferServer interface {
mustEmbedUnimplementedTransferServer()
}
func RegisterTransferServer(s grpc.ServiceRegistrar, srv TransferServer) {
s.RegisterService(&Transfer_ServiceDesc, srv)
}
func _Transfer_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TransferRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TransferServer).Transfer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.services.transfer.v1.Transfer/Transfer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TransferServer).Transfer(ctx, req.(*TransferRequest))
}
return interceptor(ctx, in, info, handler)
}
// Transfer_ServiceDesc is the grpc.ServiceDesc for Transfer service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Transfer_ServiceDesc = grpc.ServiceDesc{
ServiceName: "containerd.services.transfer.v1.Transfer",
HandlerType: (*TransferServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Transfer",
Handler: _Transfer_Transfer_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/containerd/containerd/api/services/transfer/v1/transfer.proto",
}

18
api/types/transfer/doc.go Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package transfer defines the transfer types.
package transfer

View File

@ -0,0 +1,358 @@
//
//Copyright The containerd Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.20.1
// source: github.com/containerd/containerd/api/types/transfer/imagestore.proto
package transfer
import (
types "github.com/containerd/containerd/api/types"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ImageStore struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Platforms []*types.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"`
AllMetadata bool `protobuf:"varint,4,opt,name=all_metadata,json=allMetadata,proto3" json:"all_metadata,omitempty"`
ManifestLimit uint32 `protobuf:"varint,5,opt,name=manifest_limit,json=manifestLimit,proto3" json:"manifest_limit,omitempty"`
// prefix is the intended image name prefix for imported images
Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
// check_prefix only stores images with the prefix
CheckPrefix bool `protobuf:"varint,7,opt,name=check_prefix,json=checkPrefix,proto3" json:"check_prefix,omitempty"`
// digest_refs adds digest references for images using prefix
DigestRefs bool `protobuf:"varint,8,opt,name=digest_refs,json=digestRefs,proto3" json:"digest_refs,omitempty"`
// always_digest includes a digest image even when a non-digest image is stored
AlwaysDigest bool `protobuf:"varint,9,opt,name=always_digest,json=alwaysDigest,proto3" json:"always_digest,omitempty"`
Unpacks []*UnpackConfiguration `protobuf:"bytes,10,rep,name=unpacks,proto3" json:"unpacks,omitempty"`
}
func (x *ImageStore) Reset() {
*x = ImageStore{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ImageStore) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImageStore) ProtoMessage() {}
func (x *ImageStore) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImageStore.ProtoReflect.Descriptor instead.
func (*ImageStore) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{0}
}
func (x *ImageStore) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ImageStore) GetLabels() map[string]string {
if x != nil {
return x.Labels
}
return nil
}
func (x *ImageStore) GetPlatforms() []*types.Platform {
if x != nil {
return x.Platforms
}
return nil
}
func (x *ImageStore) GetAllMetadata() bool {
if x != nil {
return x.AllMetadata
}
return false
}
func (x *ImageStore) GetManifestLimit() uint32 {
if x != nil {
return x.ManifestLimit
}
return 0
}
func (x *ImageStore) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *ImageStore) GetCheckPrefix() bool {
if x != nil {
return x.CheckPrefix
}
return false
}
func (x *ImageStore) GetDigestRefs() bool {
if x != nil {
return x.DigestRefs
}
return false
}
func (x *ImageStore) GetAlwaysDigest() bool {
if x != nil {
return x.AlwaysDigest
}
return false
}
func (x *ImageStore) GetUnpacks() []*UnpackConfiguration {
if x != nil {
return x.Unpacks
}
return nil
}
type UnpackConfiguration struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// platform is the platform to unpack for, used for resolving manifest and snapshotter
// if not provided
Platform *types.Platform `protobuf:"bytes,1,opt,name=platform,proto3" json:"platform,omitempty"`
// snapshotter to unpack to, if not provided default for platform shoudl be used
Snapshotter string `protobuf:"bytes,2,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
}
func (x *UnpackConfiguration) Reset() {
*x = UnpackConfiguration{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UnpackConfiguration) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnpackConfiguration) ProtoMessage() {}
func (x *UnpackConfiguration) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnpackConfiguration.ProtoReflect.Descriptor instead.
func (*UnpackConfiguration) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{1}
}
func (x *UnpackConfiguration) GetPlatform() *types.Platform {
if x != nil {
return x.Platform
}
return nil
}
func (x *UnpackConfiguration) GetSnapshotter() string {
if x != nil {
return x.Snapshotter
}
return ""
}
var File_github_com_containerd_containerd_api_types_transfer_imagestore_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc = []byte{
0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65,
0x72, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c,
0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf5, 0x03, 0x0a,
0x0a, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x49, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x49, 0x6d, 0x61, 0x67,
0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c,
0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66,
0x6f, 0x72, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x6c, 0x6c, 0x4d,
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x6e, 0x69, 0x66,
0x65, 0x73, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x0d, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16,
0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f,
0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x68,
0x65, 0x63, 0x6b, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, 0x67,
0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
0x77, 0x61, 0x79, 0x73, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28,
0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12,
0x48, 0x0a, 0x07, 0x75, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x55, 0x6e, 0x70,
0x61, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x07, 0x75, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62,
0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x3a, 0x02, 0x38, 0x01, 0x22, 0x6f, 0x0a, 0x13, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x08, 0x70,
0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66,
0x6f, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74,
0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
0x6f, 0x74, 0x74, 0x65, 0x72, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63,
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescOnce sync.Once
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc
)
func file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP() []byte {
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescOnce.Do(func() {
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData)
})
return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData
}
var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes = []interface{}{
(*ImageStore)(nil), // 0: containerd.types.transfer.ImageStore
(*UnpackConfiguration)(nil), // 1: containerd.types.transfer.UnpackConfiguration
nil, // 2: containerd.types.transfer.ImageStore.LabelsEntry
(*types.Platform)(nil), // 3: containerd.types.Platform
}
var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs = []int32{
2, // 0: containerd.types.transfer.ImageStore.labels:type_name -> containerd.types.transfer.ImageStore.LabelsEntry
3, // 1: containerd.types.transfer.ImageStore.platforms:type_name -> containerd.types.Platform
1, // 2: containerd.types.transfer.ImageStore.unpacks:type_name -> containerd.types.transfer.UnpackConfiguration
3, // 3: containerd.types.transfer.UnpackConfiguration.platform:type_name -> containerd.types.Platform
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_init() }
func file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_init() {
if File_github_com_containerd_containerd_api_types_transfer_imagestore_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ImageStore); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UnpackConfiguration); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc,
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes,
DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs,
MessageInfos: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes,
}.Build()
File_github_com_containerd_containerd_api_types_transfer_imagestore_proto = out.File
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc = nil
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes = nil
file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs = nil
}

View File

@ -0,0 +1,58 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.types.transfer;
import "github.com/containerd/containerd/api/types/platform.proto";
option go_package = "github.com/containerd/containerd/api/types/transfer";
message ImageStore {
string name = 1;
map<string, string> labels = 2;
// Content filters
repeated types.Platform platforms = 3;
bool all_metadata = 4;
uint32 manifest_limit = 5;
// Import naming
// prefix is the intended image name prefix for imported images
string prefix = 6;
// check_prefix only stores images with the prefix
bool check_prefix = 7;
// digest_refs adds digest references for images using prefix
bool digest_refs = 8;
// always_digest includes a digest image even when a non-digest image is stored
bool always_digest = 9;
// Unpack Configuration, multiple allowed
repeated UnpackConfiguration unpacks = 10;
}
message UnpackConfiguration {
// platform is the platform to unpack for, used for resolving manifest and snapshotter
// if not provided
types.Platform platform = 1;
// snapshotter to unpack to, if not provided default for platform shoudl be used
string snapshotter = 2;
}

View File

@ -0,0 +1,264 @@
//
//Copyright The containerd Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.20.1
// source: github.com/containerd/containerd/api/types/transfer/importexport.proto
package transfer
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ImageImportStream struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Stream is used to identify the binary input stream for the import operation.
// The stream uses the transfer binary stream protocol with the client as the sender.
// The binary data is expected to be a raw tar stream.
Stream string `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"`
MediaType string `protobuf:"bytes,2,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
ForceCompress bool `protobuf:"varint,3,opt,name=force_compress,json=forceCompress,proto3" json:"force_compress,omitempty"`
}
func (x *ImageImportStream) Reset() {
*x = ImageImportStream{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ImageImportStream) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImageImportStream) ProtoMessage() {}
func (x *ImageImportStream) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImageImportStream.ProtoReflect.Descriptor instead.
func (*ImageImportStream) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP(), []int{0}
}
func (x *ImageImportStream) GetStream() string {
if x != nil {
return x.Stream
}
return ""
}
func (x *ImageImportStream) GetMediaType() string {
if x != nil {
return x.MediaType
}
return ""
}
func (x *ImageImportStream) GetForceCompress() bool {
if x != nil {
return x.ForceCompress
}
return false
}
type ImageExportStream struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Stream is used to identify the binary output stream for the export operation.
// The stream uses the transfer binary stream protocol with the server as the sender.
// The binary data is expected to be a raw tar stream.
Stream string `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"`
MediaType string `protobuf:"bytes,2,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
}
func (x *ImageExportStream) Reset() {
*x = ImageExportStream{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ImageExportStream) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImageExportStream) ProtoMessage() {}
func (x *ImageExportStream) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImageExportStream.ProtoReflect.Descriptor instead.
func (*ImageExportStream) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP(), []int{1}
}
func (x *ImageExportStream) GetStream() string {
if x != nil {
return x.Stream
}
return ""
}
func (x *ImageExportStream) GetMediaType() string {
if x != nil {
return x.MediaType
}
return ""
}
var File_github_com_containerd_containerd_api_types_transfer_importexport_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc = []byte{
0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x78, 0x70, 0x6f,
0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73,
0x66, 0x65, 0x72, 0x22, 0x71, 0x0a, 0x11, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x49, 0x6d, 0x70, 0x6f,
0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12,
0x25, 0x0a, 0x0e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73,
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x6f,
0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x22, 0x4a, 0x0a, 0x11, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x45,
0x78, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79,
0x70, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescOnce sync.Once
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc
)
func file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP() []byte {
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescOnce.Do(func() {
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData)
})
return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData
}
var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes = []interface{}{
(*ImageImportStream)(nil), // 0: containerd.types.transfer.ImageImportStream
(*ImageExportStream)(nil), // 1: containerd.types.transfer.ImageExportStream
}
var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_types_transfer_importexport_proto_init() }
func file_github_com_containerd_containerd_api_types_transfer_importexport_proto_init() {
if File_github_com_containerd_containerd_api_types_transfer_importexport_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ImageImportStream); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ImageExportStream); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes,
DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs,
MessageInfos: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes,
}.Build()
File_github_com_containerd_containerd_api_types_transfer_importexport_proto = out.File
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc = nil
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes = nil
file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs = nil
}

View File

@ -0,0 +1,41 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.types.transfer;
option go_package = "github.com/containerd/containerd/api/types/transfer";
message ImageImportStream {
// Stream is used to identify the binary input stream for the import operation.
// The stream uses the transfer binary stream protocol with the client as the sender.
// The binary data is expected to be a raw tar stream.
string stream = 1;
string media_type = 2;
bool force_compress = 3;
}
message ImageExportStream {
// Stream is used to identify the binary output stream for the export operation.
// The stream uses the transfer binary stream protocol with the server as the sender.
// The binary data is expected to be a raw tar stream.
string stream = 1;
string media_type = 2;
}

View File

@ -0,0 +1,202 @@
//
//Copyright The containerd Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.20.1
// source: github.com/containerd/containerd/api/types/transfer/progress.proto
package transfer
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Progress struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Event string `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"`
Progress int64 `protobuf:"varint,4,opt,name=progress,proto3" json:"progress,omitempty"`
Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
}
func (x *Progress) Reset() {
*x = Progress{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Progress) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Progress) ProtoMessage() {}
func (x *Progress) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Progress.ProtoReflect.Descriptor instead.
func (*Progress) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescGZIP(), []int{0}
}
func (x *Progress) GetEvent() string {
if x != nil {
return x.Event
}
return ""
}
func (x *Progress) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Progress) GetParents() []string {
if x != nil {
return x.Parents
}
return nil
}
func (x *Progress) GetProgress() int64 {
if x != nil {
return x.Progress
}
return 0
}
func (x *Progress) GetTotal() int64 {
if x != nil {
return x.Total
}
return 0
}
var File_github_com_containerd_containerd_api_types_transfer_progress_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc = []byte{
0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x22,
0x80, 0x01, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05,
0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x76, 0x65,
0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73,
0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01,
0x28, 0x03, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05,
0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74,
0x61, 0x6c, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescOnce sync.Once
file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc
)
func file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescGZIP() []byte {
file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescOnce.Do(func() {
file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData)
})
return file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData
}
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes = []interface{}{
(*Progress)(nil), // 0: containerd.types.transfer.Progress
}
var file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_types_transfer_progress_proto_init() }
func file_github_com_containerd_containerd_api_types_transfer_progress_proto_init() {
if File_github_com_containerd_containerd_api_types_transfer_progress_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Progress); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes,
DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs,
MessageInfos: file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes,
}.Build()
File_github_com_containerd_containerd_api_types_transfer_progress_proto = out.File
file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc = nil
file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes = nil
file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs = nil
}

View File

@ -0,0 +1,29 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.types.transfer;
option go_package = "github.com/containerd/containerd/api/types/transfer";
message Progress {
string event = 1;
string name = 2;
repeated string parents = 3;
int64 progress = 4;
int64 total = 5;
}

View File

@ -0,0 +1,518 @@
//
//Copyright The containerd Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.20.1
// source: github.com/containerd/containerd/api/types/transfer/registry.proto
package transfer
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type AuthType int32
const (
AuthType_NONE AuthType = 0
// CREDENTIALS is used to exchange username/password for access token
// using an oauth or "Docker Registry Token" server
AuthType_CREDENTIALS AuthType = 1
// REFRESH is used to exchange secret for access token using an oauth
// or "Docker Registry Token" server
AuthType_REFRESH AuthType = 2
// HEADER is used to set the HTTP Authorization header to secret
// directly for the registry.
// Value should be `<auth-scheme> <authorization-parameters>`
AuthType_HEADER AuthType = 3
)
// Enum value maps for AuthType.
var (
AuthType_name = map[int32]string{
0: "NONE",
1: "CREDENTIALS",
2: "REFRESH",
3: "HEADER",
}
AuthType_value = map[string]int32{
"NONE": 0,
"CREDENTIALS": 1,
"REFRESH": 2,
"HEADER": 3,
}
)
func (x AuthType) Enum() *AuthType {
p := new(AuthType)
*p = x
return p
}
func (x AuthType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AuthType) Descriptor() protoreflect.EnumDescriptor {
return file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes[0].Descriptor()
}
func (AuthType) Type() protoreflect.EnumType {
return &file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes[0]
}
func (x AuthType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AuthType.Descriptor instead.
func (AuthType) EnumDescriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{0}
}
type OCIRegistry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"`
Resolver *RegistryResolver `protobuf:"bytes,2,opt,name=resolver,proto3" json:"resolver,omitempty"`
}
func (x *OCIRegistry) Reset() {
*x = OCIRegistry{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *OCIRegistry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OCIRegistry) ProtoMessage() {}
func (x *OCIRegistry) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OCIRegistry.ProtoReflect.Descriptor instead.
func (*OCIRegistry) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{0}
}
func (x *OCIRegistry) GetReference() string {
if x != nil {
return x.Reference
}
return ""
}
func (x *OCIRegistry) GetResolver() *RegistryResolver {
if x != nil {
return x.Resolver
}
return nil
}
type RegistryResolver struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// auth_stream is used to refer to a stream which auth callbacks may be
// made on.
AuthStream string `protobuf:"bytes,1,opt,name=auth_stream,json=authStream,proto3" json:"auth_stream,omitempty"`
// Headers
Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *RegistryResolver) Reset() {
*x = RegistryResolver{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RegistryResolver) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegistryResolver) ProtoMessage() {}
func (x *RegistryResolver) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegistryResolver.ProtoReflect.Descriptor instead.
func (*RegistryResolver) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{1}
}
func (x *RegistryResolver) GetAuthStream() string {
if x != nil {
return x.AuthStream
}
return ""
}
func (x *RegistryResolver) GetHeaders() map[string]string {
if x != nil {
return x.Headers
}
return nil
}
// AuthRequest is sent as a callback on a stream
type AuthRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// host is the registry host
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
// reference is the namespace and repository name requested from the registry
Reference string `protobuf:"bytes,2,opt,name=reference,proto3" json:"reference,omitempty"`
// wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry
Wwwauthenticate []string `protobuf:"bytes,3,rep,name=wwwauthenticate,proto3" json:"wwwauthenticate,omitempty"`
}
func (x *AuthRequest) Reset() {
*x = AuthRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AuthRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AuthRequest) ProtoMessage() {}
func (x *AuthRequest) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AuthRequest.ProtoReflect.Descriptor instead.
func (*AuthRequest) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{2}
}
func (x *AuthRequest) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *AuthRequest) GetReference() string {
if x != nil {
return x.Reference
}
return ""
}
func (x *AuthRequest) GetWwwauthenticate() []string {
if x != nil {
return x.Wwwauthenticate
}
return nil
}
type AuthResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
AuthType AuthType `protobuf:"varint,1,opt,name=authType,proto3,enum=containerd.types.transfer.AuthType" json:"authType,omitempty"`
Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"`
ExpireAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expire_at,json=expireAt,proto3" json:"expire_at,omitempty"` // TODO: Stream error
}
func (x *AuthResponse) Reset() {
*x = AuthResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AuthResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AuthResponse) ProtoMessage() {}
func (x *AuthResponse) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AuthResponse.ProtoReflect.Descriptor instead.
func (*AuthResponse) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{3}
}
func (x *AuthResponse) GetAuthType() AuthType {
if x != nil {
return x.AuthType
}
return AuthType_NONE
}
func (x *AuthResponse) GetSecret() string {
if x != nil {
return x.Secret
}
return ""
}
func (x *AuthResponse) GetUsername() string {
if x != nil {
return x.Username
}
return ""
}
func (x *AuthResponse) GetExpireAt() *timestamppb.Timestamp {
if x != nil {
return x.ExpireAt
}
return nil
}
var File_github_com_containerd_containerd_api_types_transfer_registry_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc = []byte{
0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x1a,
0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0x74, 0x0a, 0x0b, 0x4f, 0x43, 0x49, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12,
0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a,
0x08, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69,
0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x08, 0x72, 0x65,
0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73,
0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61,
0x75, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x52, 0x0a, 0x07,
0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65,
0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x69, 0x0a, 0x0b,
0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68,
0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12,
0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a,
0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65,
0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65,
0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68,
0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72,
0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52,
0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63,
0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65,
0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a,
0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78,
0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x2a, 0x3e, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79,
0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b,
0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a,
0x07, 0x52, 0x45, 0x46, 0x52, 0x45, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x45,
0x41, 0x44, 0x45, 0x52, 0x10, 0x03, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f,
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74,
0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescOnce sync.Once
file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc
)
func file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP() []byte {
file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescOnce.Do(func() {
file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData)
})
return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData
}
var file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes = []interface{}{
(AuthType)(0), // 0: containerd.types.transfer.AuthType
(*OCIRegistry)(nil), // 1: containerd.types.transfer.OCIRegistry
(*RegistryResolver)(nil), // 2: containerd.types.transfer.RegistryResolver
(*AuthRequest)(nil), // 3: containerd.types.transfer.AuthRequest
(*AuthResponse)(nil), // 4: containerd.types.transfer.AuthResponse
nil, // 5: containerd.types.transfer.RegistryResolver.HeadersEntry
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
}
var file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs = []int32{
2, // 0: containerd.types.transfer.OCIRegistry.resolver:type_name -> containerd.types.transfer.RegistryResolver
5, // 1: containerd.types.transfer.RegistryResolver.headers:type_name -> containerd.types.transfer.RegistryResolver.HeadersEntry
0, // 2: containerd.types.transfer.AuthResponse.authType:type_name -> containerd.types.transfer.AuthType
6, // 3: containerd.types.transfer.AuthResponse.expire_at:type_name -> google.protobuf.Timestamp
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_types_transfer_registry_proto_init() }
func file_github_com_containerd_containerd_api_types_transfer_registry_proto_init() {
if File_github_com_containerd_containerd_api_types_transfer_registry_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OCIRegistry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RegistryResolver); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AuthRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AuthResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc,
NumEnums: 1,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes,
DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs,
EnumInfos: file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes,
MessageInfos: file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes,
}.Build()
File_github_com_containerd_containerd_api_types_transfer_registry_proto = out.File
file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc = nil
file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes = nil
file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs = nil
}

View File

@ -0,0 +1,79 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.types.transfer;
import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/types/transfer";
message OCIRegistry {
string reference = 1;
RegistryResolver resolver = 2;
}
message RegistryResolver {
// auth_stream is used to refer to a stream which auth callbacks may be
// made on.
string auth_stream = 1;
// Headers
map<string, string> headers = 2;
// Allow custom hosts dir?
// Force skip verify
// Force HTTP
// CA callback? Client TLS callback?
}
// AuthRequest is sent as a callback on a stream
message AuthRequest {
// host is the registry host
string host = 1;
// reference is the namespace and repository name requested from the registry
string reference = 2;
// wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry
repeated string wwwauthenticate = 3;
}
enum AuthType {
NONE = 0;
// CREDENTIALS is used to exchange username/password for access token
// using an oauth or "Docker Registry Token" server
CREDENTIALS = 1;
// REFRESH is used to exchange secret for access token using an oauth
// or "Docker Registry Token" server
REFRESH = 2;
// HEADER is used to set the HTTP Authorization header to secret
// directly for the registry.
// Value should be `<auth-scheme> <authorization-parameters>`
HEADER = 3;
}
message AuthResponse {
AuthType authType = 1;
string secret = 2;
string username = 3;
google.protobuf.Timestamp expire_at = 4;
// TODO: Stream error
}

View File

@ -0,0 +1,226 @@
//
//Copyright The containerd Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.20.1
// source: github.com/containerd/containerd/api/types/transfer/streaming.proto
package transfer
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Data struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (x *Data) Reset() {
*x = Data{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Data) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Data) ProtoMessage() {}
func (x *Data) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Data.ProtoReflect.Descriptor instead.
func (*Data) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP(), []int{0}
}
func (x *Data) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type WindowUpdate struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Update int32 `protobuf:"varint,1,opt,name=update,proto3" json:"update,omitempty"`
}
func (x *WindowUpdate) Reset() {
*x = WindowUpdate{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *WindowUpdate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WindowUpdate) ProtoMessage() {}
func (x *WindowUpdate) ProtoReflect() protoreflect.Message {
mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WindowUpdate.ProtoReflect.Descriptor instead.
func (*WindowUpdate) Descriptor() ([]byte, []int) {
return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP(), []int{1}
}
func (x *WindowUpdate) GetUpdate() int32 {
if x != nil {
return x.Update
}
return 0
}
var File_github_com_containerd_containerd_api_types_transfer_streaming_proto protoreflect.FileDescriptor
var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc = []byte{
0x0a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72,
0x22, 0x1a, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x0c,
0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x75, 0x70,
0x64, 0x61, 0x74, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70,
0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var (
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescOnce sync.Once
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc
)
func file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP() []byte {
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescOnce.Do(func() {
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData)
})
return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData
}
var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes = []interface{}{
(*Data)(nil), // 0: containerd.types.transfer.Data
(*WindowUpdate)(nil), // 1: containerd.types.transfer.WindowUpdate
}
var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_containerd_containerd_api_types_transfer_streaming_proto_init() }
func file_github_com_containerd_containerd_api_types_transfer_streaming_proto_init() {
if File_github_com_containerd_containerd_api_types_transfer_streaming_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Data); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WindowUpdate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes,
DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs,
MessageInfos: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes,
}.Build()
File_github_com_containerd_containerd_api_types_transfer_streaming_proto = out.File
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc = nil
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes = nil
file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs = nil
}

View File

@ -0,0 +1,29 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.types.transfer;
option go_package = "github.com/containerd/containerd/api/types/transfer";
message Data {
bytes data = 1;
}
message WindowUpdate {
int32 update = 1;
}

View File

@ -24,6 +24,8 @@ import (
_ "github.com/containerd/containerd/leases/plugin"
_ "github.com/containerd/containerd/metadata/plugin"
_ "github.com/containerd/containerd/pkg/nri/plugin"
_ "github.com/containerd/containerd/plugins/streaming"
_ "github.com/containerd/containerd/plugins/transfer"
_ "github.com/containerd/containerd/runtime/restart/monitor"
_ "github.com/containerd/containerd/runtime/v2"
_ "github.com/containerd/containerd/services/containers"
@ -38,6 +40,8 @@ import (
_ "github.com/containerd/containerd/services/opt"
_ "github.com/containerd/containerd/services/sandbox"
_ "github.com/containerd/containerd/services/snapshots"
_ "github.com/containerd/containerd/services/streaming"
_ "github.com/containerd/containerd/services/tasks"
_ "github.com/containerd/containerd/services/transfer"
_ "github.com/containerd/containerd/services/version"
)

View File

@ -26,6 +26,9 @@ import (
"github.com/containerd/containerd/cmd/ctr/commands"
"github.com/containerd/containerd/images/archive"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/transfer"
tarchive "github.com/containerd/containerd/pkg/transfer/archive"
"github.com/containerd/containerd/pkg/transfer/image"
"github.com/containerd/containerd/platforms"
"github.com/urfave/cli"
)
@ -81,6 +84,10 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb
Name: "no-unpack",
Usage: "skip unpacking the images, cannot be used with --discard-unpacked-layers, false by default",
},
cli.BoolTFlag{
Name: "local",
Usage: "run import locally rather than through transfer API",
},
cli.BoolFlag{
Name: "compress-blobs",
Usage: "compress uncompressed blobs when creating manifest (Docker format only)",
@ -98,6 +105,65 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb
platformMatcher platforms.MatchComparer
)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
if !context.BoolT("local") {
var opts []image.StoreOpt
prefix := context.String("base-name")
if prefix == "" {
prefix = fmt.Sprintf("import-%s", time.Now().Format("2006-01-02"))
opts = append(opts, image.WithNamePrefix(prefix, false))
} else {
// When provided, filter out references which do not match
opts = append(opts, image.WithNamePrefix(prefix, true))
}
if context.Bool("digests") {
opts = append(opts, image.WithDigestRefs(!context.Bool("skip-digest-for-named")))
}
// TODO: Add platform options
// TODO: Add unpack options
is := image.NewStore(context.String("index-name"), opts...)
var iopts []tarchive.ImportOpt
if context.Bool("compress-blobs") {
iopts = append(iopts, tarchive.WithForceCompression)
}
var r io.ReadCloser
if in == "-" {
r = os.Stdin
} else {
var err error
r, err = os.Open(in)
if err != nil {
return err
}
}
iis := tarchive.NewImageImportStream(r, "", iopts...)
pf, done := ProgressHandler(ctx, os.Stdout)
defer done()
err := client.Transfer(ctx, iis, is, transfer.WithProgress(pf))
closeErr := r.Close()
if err != nil {
return err
}
return closeErr
}
// Local logic
prefix := context.String("base-name")
if prefix == "" {
prefix = fmt.Sprintf("import-%s", time.Now().Format("2006-01-02"))
@ -143,12 +209,6 @@ If foobar.tar contains an OCI ref named "latest" and anonymous ref "sha256:deadb
opts = append(opts, containerd.WithDiscardUnpackedLayers())
}
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
ctx, done, err := client.WithLease(ctx)
if err != nil {
return err

View File

@ -17,7 +17,11 @@
package images
import (
"context"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/containerd/containerd"
@ -25,6 +29,9 @@ import (
"github.com/containerd/containerd/cmd/ctr/commands/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/progress"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/pkg/transfer/image"
"github.com/containerd/containerd/platforms"
"github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@ -66,6 +73,10 @@ command. As part of this process, we do the following:
Name: "max-concurrent-downloads",
Usage: "Set the max concurrent downloads for each pull",
},
cli.BoolTFlag{
Name: "local",
Usage: "Fetch content from local client rather than using transfer service",
},
),
Action: func(context *cli.Context) error {
var (
@ -81,12 +92,55 @@ command. As part of this process, we do the following:
}
defer cancel()
if !context.BoolT("local") {
ch, err := commands.NewStaticCredentials(ctx, context, ref)
if err != nil {
return err
}
var sopts []image.StoreOpt
if !context.Bool("all-platforms") {
var p []ocispec.Platform
for _, s := range context.StringSlice("platform") {
ps, err := platforms.Parse(s)
if err != nil {
return fmt.Errorf("unable to parse platform %s: %w", s, err)
}
p = append(p, ps)
}
if len(p) == 0 {
p = append(p, platforms.DefaultSpec())
}
sopts = append(sopts, image.WithPlatforms(p...))
}
// TODO: Support unpack for all platforms..?
// Pass in a *?
if context.Bool("metadata-only") {
sopts = append(sopts, image.WithAllMetadata)
// Any with an empty set is None
// TODO: Specify way to specify not default platorm
//config.PlatformMatcher = platforms.Any()
} else if context.Bool("all-metadata") {
sopts = append(sopts, image.WithAllMetadata)
}
reg := image.NewOCIRegistry(ref, nil, ch)
is := image.NewStore(ref, sopts...)
pf, done := ProgressHandler(ctx, os.Stdout)
defer done()
return client.Transfer(ctx, reg, is, transfer.WithProgress(pf))
}
ctx, done, err := client.WithLease(ctx)
if err != nil {
return err
}
defer done(ctx)
// TODO: Handle this locally via transfer config
config, err := content.NewFetchConfig(ctx, context)
if err != nil {
return err
@ -141,3 +195,265 @@ command. As part of this process, we do the following:
return nil
},
}
type progressNode struct {
transfer.Progress
children []*progressNode
root bool
}
// ProgressHandler continuously updates the output with job progress
// by checking status in the content store.
func ProgressHandler(ctx context.Context, out io.Writer) (transfer.ProgressFunc, func()) {
ctx, cancel := context.WithCancel(ctx)
var (
fw = progress.NewWriter(out)
start = time.Now()
statuses = map[string]*progressNode{}
roots = []*progressNode{}
progress transfer.ProgressFunc
pc = make(chan transfer.Progress, 1)
status string
closeC = make(chan struct{})
)
progress = func(p transfer.Progress) {
select {
case pc <- p:
case <-ctx.Done():
}
}
done := func() {
cancel()
<-closeC
}
go func() {
defer close(closeC)
for {
select {
case p := <-pc:
if p.Name == "" {
status = p.Event
continue
}
if node, ok := statuses[p.Name]; !ok {
node = &progressNode{
Progress: p,
root: true,
}
if len(p.Parents) == 0 {
roots = append(roots, node)
} else {
var parents []string
for _, parent := range p.Parents {
pStatus, ok := statuses[parent]
if ok {
parents = append(parents, parent)
pStatus.children = append(pStatus.children, node)
node.root = false
}
}
node.Progress.Parents = parents
if node.root {
roots = append(roots, node)
}
}
statuses[p.Name] = node
} else {
if len(node.Progress.Parents) != len(p.Parents) {
var parents []string
var removeRoot bool
for _, parent := range p.Parents {
pStatus, ok := statuses[parent]
if ok {
parents = append(parents, parent)
var found bool
for _, child := range pStatus.children {
if child.Progress.Name == p.Name {
found = true
break
}
}
if !found {
pStatus.children = append(pStatus.children, node)
}
if node.root {
removeRoot = true
}
node.root = false
}
}
p.Parents = parents
// Check if needs to remove from root
if removeRoot {
for i := range roots {
if roots[i] == node {
roots = append(roots[:i], roots[i+1:]...)
break
}
}
}
}
node.Progress = p
}
/*
all := make([]transfer.Progress, 0, len(statuses))
for _, p := range statuses {
all = append(all, p.Progress)
}
sort.Slice(all, func(i, j int) bool {
return all[i].Name < all[j].Name
})
Display(fw, status, all, start)
*/
DisplayHierarchy(fw, status, roots, start)
fw.Flush()
case <-ctx.Done():
return
}
}
}()
return progress, done
}
func DisplayHierarchy(w io.Writer, status string, roots []*progressNode, start time.Time) {
total := displayNode(w, "", roots)
// Print the Status line
fmt.Fprintf(w, "%s\telapsed: %-4.1fs\ttotal: %7.6v\t(%v)\t\n",
status,
time.Since(start).Seconds(),
// TODO(stevvooe): These calculations are actually way off.
// Need to account for previously downloaded data. These
// will basically be right for a download the first time
// but will be skewed if restarting, as it includes the
// data into the start time before.
progress.Bytes(total),
progress.NewBytesPerSecond(total, time.Since(start)))
}
func displayNode(w io.Writer, prefix string, nodes []*progressNode) int64 {
var total int64
for i, node := range nodes {
status := node.Progress
total += status.Progress
pf, cpf := prefixes(i, len(nodes))
if node.root {
pf, cpf = "", ""
}
name := prefix + pf + displayName(status.Name)
switch status.Event {
case "downloading", "uploading":
var bar progress.Bar
if status.Total > 0.0 {
bar = progress.Bar(float64(status.Progress) / float64(status.Total))
}
fmt.Fprintf(w, "%-40.40s\t%-11s\t%40r\t%8.8s/%s\t\n",
name,
status.Event,
bar,
progress.Bytes(status.Progress), progress.Bytes(status.Total))
case "resolving", "waiting":
bar := progress.Bar(0.0)
fmt.Fprintf(w, "%-40.40s\t%-11s\t%40r\t\n",
name,
status.Event,
bar)
case "complete":
bar := progress.Bar(1.0)
fmt.Fprintf(w, "%-40.40s\t%-11s\t%40r\t\n",
name,
status.Event,
bar)
default:
fmt.Fprintf(w, "%-40.40s\t%s\t\n",
name,
status.Event)
}
total += displayNode(w, prefix+cpf, node.children)
}
return total
}
func prefixes(index, length int) (prefix string, childPrefix string) {
if index+1 == length {
prefix = "└──"
childPrefix = " "
} else {
prefix = "├──"
childPrefix = "│ "
}
return
}
func displayName(name string) string {
parts := strings.Split(name, "-")
for i := range parts {
parts[i] = shortenName(parts[i])
}
return strings.Join(parts, " ")
}
func shortenName(name string) string {
if strings.HasPrefix(name, "sha256:") && len(name) == 71 {
return "(" + name[7:19] + ")"
}
return name
}
// Display pretty prints out the download or upload progress
// Status tree
func Display(w io.Writer, status string, statuses []transfer.Progress, start time.Time) {
var total int64
for _, status := range statuses {
total += status.Progress
switch status.Event {
case "downloading", "uploading":
var bar progress.Bar
if status.Total > 0.0 {
bar = progress.Bar(float64(status.Progress) / float64(status.Total))
}
fmt.Fprintf(w, "%s:\t%s\t%40r\t%8.8s/%s\t\n",
status.Name,
status.Event,
bar,
progress.Bytes(status.Progress), progress.Bytes(status.Total))
case "resolving", "waiting":
bar := progress.Bar(0.0)
fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
status.Name,
status.Event,
bar)
case "complete", "done":
bar := progress.Bar(1.0)
fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
status.Name,
status.Event,
bar)
default:
fmt.Fprintf(w, "%s:\t%s\t\n",
status.Name,
status.Event)
}
}
// Print the Status line
fmt.Fprintf(w, "%s\telapsed: %-4.1fs\ttotal: %7.6v\t(%v)\t\n",
status,
time.Since(start).Seconds(),
// TODO(stevvooe): These calculations are actually way off.
// Need to account for previously downloaded data. These
// will basically be right for a download the first time
// but will be skewed if restarting, as it includes the
// data into the start time before.
progress.Bytes(total),
progress.NewBytesPerSecond(total, time.Since(start)))
}

View File

@ -32,6 +32,8 @@ import (
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/progress"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/pkg/transfer/image"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
@ -68,6 +70,9 @@ var pushCommand = cli.Command{
}, cli.IntFlag{
Name: "max-concurrent-uploaded-layers",
Usage: "set the max concurrent uploaded layers for each push",
}, cli.BoolTFlag{
Name: "local",
Usage: "push content from local client rather than using transfer service",
}, cli.BoolFlag{
Name: "allow-non-distributable-blobs",
Usage: "allow pushing blobs that are marked as non-distributable",
@ -89,6 +94,24 @@ var pushCommand = cli.Command{
}
defer cancel()
if !context.BoolT("local") {
ch, err := commands.NewStaticCredentials(ctx, context, ref)
if err != nil {
return err
}
if local == "" {
local = ref
}
reg := image.NewOCIRegistry(ref, nil, ch)
is := image.NewStore(local)
pf, done := ProgressHandler(ctx, os.Stdout)
defer done()
return client.Transfer(ctx, is, reg, transfer.WithProgress(pf))
}
if manifest := context.String("manifest"); manifest != "" {
desc.Digest, err = digest.Parse(manifest)
if err != nil {

View File

@ -32,6 +32,7 @@ import (
"github.com/containerd/console"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/transfer/image"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/config"
@ -209,3 +210,50 @@ func NewDebugClientTrace(ctx gocontext.Context) *httptrace.ClientTrace {
},
}
}
type staticCredentials struct {
ref string
username string
secret string
}
// NewStaticCredentials gets credentials from passing in cli context
func NewStaticCredentials(ctx gocontext.Context, clicontext *cli.Context, ref string) (image.CredentialHelper, error) {
username := clicontext.String("user")
var secret string
if i := strings.IndexByte(username, ':'); i > 0 {
secret = username[i+1:]
username = username[0:i]
}
if username != "" {
if secret == "" {
fmt.Printf("Password: ")
var err error
secret, err = passwordPrompt()
if err != nil {
return nil, err
}
fmt.Print("\n")
}
} else if rt := clicontext.String("refresh"); rt != "" {
secret = rt
}
return &staticCredentials{
ref: ref,
username: username,
secret: secret,
}, nil
}
func (sc *staticCredentials) GetCredentials(ctx gocontext.Context, ref, host string) (image.Credentials, error) {
if ref == sc.ref {
return image.Credentials{
Username: sc.username,
Secret: sc.secret,
}, nil
}
return image.Credentials{}, nil
}

207
docs/transfer.md Normal file
View File

@ -0,0 +1,207 @@
# Transfer Service
The transfer service is a simple flexible service which can be used to transfer artifact objects between a source and destination. The flexible API allows each implementation of the transfer interface to determines whether the transfer between the source and destination is possible. This allows new functionality to be added directly by implementations without versioning the API or requiring other implementations to handle an interface change.
The transfer service is built upon the core ideas put forth by the libchan project, that an API with binary streams and data channels as first class objects is more flexible and opens a wider variety of use cases without requiring constant protocol and API updates. To accomplish this, the transfer service makes use of the streaming service to allow binary and object streams to be accessible by transfer objects even when using grpc and ttrpc.
## Transfer API
The transfer API consists of a single operation which can be called with various different objects depending on the intended operation.
In Go the API looks like,
```go
type Transferrer interface {
Transfer(ctx context.Context, source interface{}, destination interface{}, opts ...Opt) error
}
```
The proto API looks like,
```proto
service Transfer {
rpc Transfer(TransferRequest) returns (google.protobuf.Empty);
}
message TransferRequest {
google.protobuf.Any source = 1;
google.protobuf.Any destination = 2;
// + options
}
```
## Transfer Objects (Sources and Destinations)
## Transfer Operations
| Source | Destination | Description | Local Implementation Version |
|-------------|-------------|-------------|-----------------------|
| Registry | Image Store | "pull" | 1.7 |
| Image Store | Registry | "push" | 1.7 |
| Object stream (Archive) | Image Store | "import" | 1.7 |
| Image Store | Object stream (Archive) | "export" | 1.7 (in progress) |
| Object stream (Layer) | Mount/Snapshot | "unpack" | Not implemented |
| Mount/Snapshot | Object stream (Layer) | "diff" | Not implemented |
| Image Store | Image Store | "tag" | Not implemented |
| Registry | Registry | mirror registry image | Not implemented |
### Local containerd daemon support
containerd has a single built in transfer plugin which implements most basic transfer operations. The local plugin can be configured the same way as other containerd plugins
```
[plugins]
[plugins."io.containerd.transfer.v1"]
```
## Diagram
Pull Components
```mermaid
flowchart TD
subgraph containerd Client
Client(Client)
end
subgraph containerd
subgraph Service
Streaming(Streaming Service)
Transfer(Transfer Service)
end
subgraph Transfer objects
RS(Registry Source)
ISD(Image Store Destination)
end
subgraph Backend
R(Resolver)
CS(ContentStore)
IS(Image Store)
S(Snapshotter)
end
end
Reg(((Remote Registry)))
Client-- Create Stream --> Streaming
Client-- Pull via Transfer --> Transfer
Transfer-- Get Stream --> Streaming
Transfer-- Progress via Stream--> Client
Transfer-->RS
Transfer-->ISD
Transfer-->CS
RS-->R
ISD-->IS
R-->Reg
ISD-->CS
ISD-->S
```
## Streaming
Streaming is used by the transfer service to send or receive data streams as part of an operation as well as to handle callbacks (synchronous or asynchronous). The streaming protocol should be invisible to the client Go interface. Object types such as funcs, readers, and writers can be transparently converted to the streaming protocol when going over RPC. The client and server interface can remain unchanged while the proto marshaling and unmarshaling need to be aware of the streaming protocol and have access to the stream manager. Streams are created by clients using the client side stream manager and sent via proto RPC as string stream identifiers. Server implementations of services can lookup the streams by the stream identifier using the server side stream manager.
### Progress
Progress is an asynchronous callback sent from the server to the client. It is normally representing in the Go interface as a simple callback function, which the the client implements and the server calls.
From Go types progress uses these types
```go
type ProgressFunc func(Progress)
type Progress struct {
Event string
Name string
Parents []string
Progress int64
Total int64
}
```
The proto message type sent over the stream is
```proto
message Progress {
string event = 1;
string name = 2;
repeated string parents = 3;
int64 progress = 4;
int64 total = 5;
}
```
Progress can be passed along as a transfer option to get progress on any transfer operation. The progress events may differ based on the transfer operation.
### Binary Streams
Transfer objects may also use `io.Reader` and `io.WriteCloser` directly.
The bytes are transferred over the stream using two simple proto message types
```proto
message Data {
bytes data = 1;
}
message WindowUpdate {
int32 update = 1;
}
```
The sender sends the `Data` message and the receiver sends the `WindowUpdate` message. When the client is sending an `io.Reader`, the client is the sender and server is the receiver. When a client sends an `io.WriteCloser`, the server is the sender and the client is the receiver.
Binary streams are used for import (sending an `io.Reader`) and export (sending an `io.WriteCloser`).
### Credentials
Credentials are handled as a synchronous callback from the server to the client. The callback is made when the server encounters an authorization request from a registry.
The Go interface to use a credential helper in a transfer object looks like
```go
type CredentialHelper interface {
GetCredentials(ctx context.Context, ref, host string) (Credentials, error)
}
type Credentials struct {
Host string
Username string
Secret string
Header string
}
```
It is send over a stream using the proto messages
```proto
// AuthRequest is sent as a callback on a stream
message AuthRequest {
// host is the registry host
string host = 1;
// reference is the namespace and repository name requested from the registry
string reference = 2;
// wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry
repeated string wwwauthenticate = 3;
}
enum AuthType {
NONE = 0;
// CREDENTIALS is used to exchange username/password for access token
// using an oauth or "Docker Registry Token" server
CREDENTIALS = 1;
// REFRESH is used to exchange secret for access token using an oauth
// or "Docker Registry Token" server
REFRESH = 2;
// HEADER is used to set the HTTP Authorization header to secret
// directly for the registry.
// Value should be `<auth-scheme> <authorization-parameters>`
HEADER = 3;
}
message AuthResponse {
AuthType authType = 1;
string secret = 2;
string username = 3;
google.protobuf.Timestamp expire_at = 4;
}
```

View File

@ -41,6 +41,9 @@ func AddRefPrefix(image string) func(string) string {
// a full reference.
func refTranslator(image string, checkPrefix bool) func(string) string {
return func(ref string) string {
if image == "" {
return ""
}
// Check if ref is full reference
if strings.ContainsAny(ref, "/:@") {
// If not prefixed, don't include image

View File

@ -0,0 +1,95 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"bytes"
"context"
"fmt"
"io"
"testing"
"github.com/containerd/containerd"
"github.com/containerd/containerd/pkg/transfer/archive"
)
func TestTransferEcho(t *testing.T) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
ctx, cancel := testContext(t)
defer cancel()
t.Run("ImportExportEchoBig", newImportExportEcho(ctx, client, bytes.Repeat([]byte("somecontent"), 17*1024)))
t.Run("ImportExportEchoSmall", newImportExportEcho(ctx, client, []byte("somecontent")))
t.Run("ImportExportEchoEmpty", newImportExportEcho(ctx, client, []byte("")))
}
func newImportExportEcho(ctx context.Context, client *containerd.Client, expected []byte) func(*testing.T) {
return func(t *testing.T) {
testBuf := newWaitBuffer()
err := client.Transfer(ctx, archive.NewImageImportStream(bytes.NewReader(expected), "application/octet-stream"), archive.NewImageExportStream(testBuf, "application/octet-stream"))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(expected, testBuf.Bytes()) {
t.Fatalf("Bytes did not match\n\tActual: %v\n\tExpected: %v", displayBytes(testBuf.Bytes()), displayBytes(expected))
}
}
}
type WriteBytesCloser interface {
io.WriteCloser
Bytes() []byte
}
func newWaitBuffer() WriteBytesCloser {
return &waitBuffer{
Buffer: bytes.NewBuffer(nil),
closed: make(chan struct{}),
}
}
type waitBuffer struct {
*bytes.Buffer
closed chan struct{}
}
func (wb *waitBuffer) Close() error {
select {
case <-wb.closed:
default:
close(wb.closed)
}
return nil
}
func (wb *waitBuffer) Bytes() []byte {
<-wb.closed
return wb.Buffer.Bytes()
}
func displayBytes(b []byte) string {
if len(b) > 40 {
skipped := len(b) - 20
return fmt.Sprintf("%s...(skipped %d)...%s", b[:10], skipped, b[len(b)-10:])
}
return string(b)
}

View File

@ -46,6 +46,8 @@ const (
ResourceIngest
// resourceEnd is the end of specified resource types
resourceEnd
// ResourceStream specifies a stream
ResourceStream
)
const (

View File

@ -0,0 +1,47 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package streaming
import (
"context"
"github.com/containerd/typeurl"
)
type StreamManager interface {
StreamGetter
Register(context.Context, string, Stream) error
}
type StreamGetter interface {
Get(context.Context, string) (Stream, error)
}
type StreamCreator interface {
Create(context.Context, string) (Stream, error)
}
type Stream interface {
// Send sends the object on the stream
Send(typeurl.Any) error
// Recv receives an object on the stream
Recv() (typeurl.Any, error)
// Close closes the stream
Close() error
}

View File

@ -0,0 +1,94 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"context"
"io"
transfertypes "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/pkg/transfer/plugins"
tstreaming "github.com/containerd/containerd/pkg/transfer/streaming"
"github.com/containerd/typeurl"
)
func init() {
// TODO: Move this to separate package?
plugins.Register(&transfertypes.ImageExportStream{}, &ImageExportStream{})
plugins.Register(&transfertypes.ImageImportStream{}, &ImageImportStream{})
}
// NewImageExportStream returns a image importer via tar stream
// TODO: Add export options
func NewImageExportStream(stream io.WriteCloser, mediaType string) *ImageExportStream {
return &ImageExportStream{
stream: stream,
mediaType: mediaType,
}
}
type ImageExportStream struct {
stream io.WriteCloser
mediaType string
}
func (iis *ImageExportStream) ExportStream(context.Context) (io.WriteCloser, string, error) {
return iis.stream, iis.mediaType, nil
}
func (iis *ImageExportStream) MarshalAny(ctx context.Context, sm streaming.StreamCreator) (typeurl.Any, error) {
sid := tstreaming.GenerateID("export")
stream, err := sm.Create(ctx, sid)
if err != nil {
return nil, err
}
// Receive stream and copy to writer
go func() {
if _, err := io.Copy(iis.stream, tstreaming.ReceiveStream(ctx, stream)); err != nil {
log.G(ctx).WithError(err).WithField("streamid", sid).Errorf("error copying stream")
}
iis.stream.Close()
}()
s := &transfertypes.ImageExportStream{
Stream: sid,
MediaType: iis.mediaType,
}
return typeurl.MarshalAny(s)
}
func (iis *ImageExportStream) UnmarshalAny(ctx context.Context, sm streaming.StreamGetter, any typeurl.Any) error {
var s transfertypes.ImageExportStream
if err := typeurl.UnmarshalTo(any, &s); err != nil {
return err
}
stream, err := sm.Get(ctx, s.Stream)
if err != nil {
log.G(ctx).WithError(err).WithField("stream", s.Stream).Debug("failed to get export stream")
return err
}
iis.stream = tstreaming.WriteByteStream(ctx, stream)
iis.mediaType = s.MediaType
return nil
}

View File

@ -0,0 +1,103 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"context"
"io"
transferapi "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images/archive"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
tstreaming "github.com/containerd/containerd/pkg/transfer/streaming"
"github.com/containerd/typeurl"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type ImportOpt func(*ImageImportStream)
func WithForceCompression(s *ImageImportStream) {
s.forceCompress = true
}
// NewImageImportStream returns a image importer via tar stream
func NewImageImportStream(stream io.Reader, mediaType string, opts ...ImportOpt) *ImageImportStream {
s := &ImageImportStream{
stream: stream,
mediaType: mediaType,
}
for _, opt := range opts {
opt(s)
}
return s
}
type ImageImportStream struct {
stream io.Reader
mediaType string
forceCompress bool
}
func (iis *ImageImportStream) ImportStream(context.Context) (io.Reader, string, error) {
return iis.stream, iis.mediaType, nil
}
func (iis *ImageImportStream) Import(ctx context.Context, store content.Store) (ocispec.Descriptor, error) {
var opts []archive.ImportOpt
if iis.forceCompress {
opts = append(opts, archive.WithImportCompression())
}
return archive.ImportIndex(ctx, store, iis.stream, opts...)
}
func (iis *ImageImportStream) MarshalAny(ctx context.Context, sm streaming.StreamCreator) (typeurl.Any, error) {
sid := tstreaming.GenerateID("import")
stream, err := sm.Create(ctx, sid)
if err != nil {
return nil, err
}
tstreaming.SendStream(ctx, iis.stream, stream)
s := &transferapi.ImageImportStream{
Stream: sid,
MediaType: iis.mediaType,
ForceCompress: iis.forceCompress,
}
return typeurl.MarshalAny(s)
}
func (iis *ImageImportStream) UnmarshalAny(ctx context.Context, sm streaming.StreamGetter, any typeurl.Any) error {
var s transferapi.ImageImportStream
if err := typeurl.UnmarshalTo(any, &s); err != nil {
return err
}
stream, err := sm.Get(ctx, s.Stream)
if err != nil {
log.G(ctx).WithError(err).WithField("stream", s.Stream).Debug("failed to get import stream")
return err
}
iis.stream = tstreaming.ReceiveStream(ctx, stream)
iis.mediaType = s.MediaType
iis.forceCompress = s.ForceCompress
return nil
}

View File

@ -0,0 +1,341 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package image
import (
"context"
"fmt"
"github.com/containerd/containerd/api/types"
transfertypes "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/archive"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/pkg/transfer/plugins"
"github.com/containerd/containerd/pkg/unpack"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/remotes"
"github.com/containerd/typeurl"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func init() {
// TODO: Move this to separate package?
plugins.Register(&transfertypes.ImageStore{}, &Store{}) // TODO: Rename ImageStoreDestination
}
type Store struct {
imageName string
imageLabels map[string]string
platforms []ocispec.Platform
allMetadata bool
labelMap func(ocispec.Descriptor) []string
manifestLimit int
//import image options
namePrefix string
checkPrefix bool
digestRefs bool
alwaysDigest bool
unpacks []UnpackConfiguration
}
// UnpackConfiguration specifies the platform and snapshotter to use for resolving
// the unpack Platform, if snapshotter is not specified the platform default will
// be used.
type UnpackConfiguration struct {
Platform ocispec.Platform
Snapshotter string
}
// StoreOpt defines options when configuring an image store source or destination
type StoreOpt func(*Store)
// WithImageLabels are the image labels to apply to a new image
func WithImageLabels(labels map[string]string) StoreOpt {
return func(s *Store) {
s.imageLabels = labels
}
}
// WithPlatforms specifies which platforms to fetch content for
func WithPlatforms(p ...ocispec.Platform) StoreOpt {
return func(s *Store) {
s.platforms = append(s.platforms, p...)
}
}
// WithManifestLimit defines the max number of manifests to fetch
func WithManifestLimit(limit int) StoreOpt {
return func(s *Store) {
s.manifestLimit = limit
}
}
func WithAllMetadata(s *Store) {
s.allMetadata = true
}
// WithNamePrefix sets the name prefix for imported images, if
// check is enabled, then only images with the prefix are stored.
func WithNamePrefix(prefix string, check bool) StoreOpt {
return func(s *Store) {
s.namePrefix = prefix
s.checkPrefix = check
}
}
// WithDigestRefs sets digest refs for imported images, if
// always is enabled, then digest refs are added even if a
// non-digest image name is added for the same image.
func WithDigestRefs(always bool) StoreOpt {
return func(s *Store) {
s.digestRefs = true
s.alwaysDigest = always
}
}
// WithUnpack specifies a platform to unpack for and an optional snapshotter to use
func WithUnpack(p ocispec.Platform, snapshotter string) StoreOpt {
return func(s *Store) {
s.unpacks = append(s.unpacks, UnpackConfiguration{
Platform: p,
Snapshotter: snapshotter,
})
}
}
// NewStore creates a new image store source or Destination
func NewStore(image string, opts ...StoreOpt) *Store {
s := &Store{
imageName: image,
}
for _, opt := range opts {
opt(s)
}
return s
}
func (is *Store) String() string {
return fmt.Sprintf("Local Image Store (%s)", is.imageName)
}
func (is *Store) ImageFilter(h images.HandlerFunc, cs content.Store) images.HandlerFunc {
var p platforms.MatchComparer
if len(is.platforms) == 0 {
p = platforms.All
} else {
p = platforms.Ordered(is.platforms...)
}
h = images.SetChildrenMappedLabels(cs, h, is.labelMap)
if is.allMetadata {
// Filter manifests by platforms but allow to handle manifest
// and configuration for not-target platforms
h = remotes.FilterManifestByPlatformHandler(h, p)
} else {
// Filter children by platforms if specified.
h = images.FilterPlatforms(h, p)
}
// Sort and limit manifests if a finite number is needed
if is.manifestLimit > 0 {
h = images.LimitManifests(h, p, is.manifestLimit)
}
return h
}
func (is *Store) Store(ctx context.Context, desc ocispec.Descriptor, store images.Store) (images.Image, error) {
img := images.Image{
Name: is.imageName,
Target: desc,
Labels: is.imageLabels,
}
// Handle imported image names
if refType, ok := desc.Annotations["io.containerd.import.ref-type"]; ok {
var nameT func(string) string
if is.checkPrefix {
nameT = archive.FilterRefPrefix(is.namePrefix)
} else {
nameT = archive.AddRefPrefix(is.namePrefix)
}
name := imageName(desc.Annotations, nameT)
switch refType {
case "name":
if name == "" {
return images.Image{}, fmt.Errorf("no image name: %w", errdefs.ErrNotFound)
}
img.Name = name
case "digest":
if !is.digestRefs || (!is.alwaysDigest && name != "") {
return images.Image{}, fmt.Errorf("no digest refs: %w", errdefs.ErrNotFound)
}
img.Name = fmt.Sprintf("%s@%s", is.namePrefix, desc.Digest)
default:
return images.Image{}, fmt.Errorf("ref type not supported: %w", errdefs.ErrInvalidArgument)
}
delete(desc.Annotations, "io.containerd.import.ref-type")
} else if img.Name == "" {
// No valid image combination found
return images.Image{}, fmt.Errorf("no image name found: %w", errdefs.ErrNotFound)
}
for {
if created, err := store.Create(ctx, img); err != nil {
if !errdefs.IsAlreadyExists(err) {
return images.Image{}, err
}
updated, err := store.Update(ctx, img)
if err != nil {
// if image was removed, try create again
if errdefs.IsNotFound(err) {
continue
}
return images.Image{}, err
}
img = updated
} else {
img = created
}
return img, nil
}
}
func (is *Store) Get(ctx context.Context, store images.Store) (images.Image, error) {
return store.Get(ctx, is.imageName)
}
func (is *Store) UnpackPlatforms() []unpack.Platform {
unpacks := make([]unpack.Platform, len(is.unpacks))
for i, uc := range is.unpacks {
unpacks[i].SnapshotterKey = uc.Snapshotter
unpacks[i].Platform = platforms.Only(uc.Platform)
}
return unpacks
}
func (is *Store) MarshalAny(context.Context, streaming.StreamCreator) (typeurl.Any, error) {
//unpack.Platform
s := &transfertypes.ImageStore{
Name: is.imageName,
Labels: is.imageLabels,
ManifestLimit: uint32(is.manifestLimit),
AllMetadata: is.allMetadata,
Platforms: platformsToProto(is.platforms),
Prefix: is.namePrefix,
CheckPrefix: is.checkPrefix,
DigestRefs: is.digestRefs,
AlwaysDigest: is.alwaysDigest,
Unpacks: unpackToProto(is.unpacks),
}
return typeurl.MarshalAny(s)
}
func (is *Store) UnmarshalAny(ctx context.Context, sm streaming.StreamGetter, a typeurl.Any) error {
var s transfertypes.ImageStore
if err := typeurl.UnmarshalTo(a, &s); err != nil {
return err
}
is.imageName = s.Name
is.imageLabels = s.Labels
is.manifestLimit = int(s.ManifestLimit)
is.allMetadata = s.AllMetadata
is.platforms = platformFromProto(s.Platforms)
is.namePrefix = s.Prefix
is.checkPrefix = s.CheckPrefix
is.digestRefs = s.DigestRefs
is.alwaysDigest = s.AlwaysDigest
is.unpacks = unpackFromProto(s.Unpacks)
return nil
}
func platformsToProto(platforms []ocispec.Platform) []*types.Platform {
ap := make([]*types.Platform, len(platforms))
for i := range platforms {
p := types.Platform{
OS: platforms[i].OS,
Architecture: platforms[i].Architecture,
Variant: platforms[i].Variant,
}
ap[i] = &p
}
return ap
}
func platformFromProto(platforms []*types.Platform) []ocispec.Platform {
op := make([]ocispec.Platform, len(platforms))
for i := range platforms {
op[i].OS = platforms[i].OS
op[i].Architecture = platforms[i].Architecture
op[i].Variant = platforms[i].Variant
}
return op
}
func unpackToProto(uc []UnpackConfiguration) []*transfertypes.UnpackConfiguration {
auc := make([]*transfertypes.UnpackConfiguration, len(uc))
for i := range uc {
p := types.Platform{
OS: uc[i].Platform.OS,
Architecture: uc[i].Platform.Architecture,
Variant: uc[i].Platform.Variant,
}
auc[i] = &transfertypes.UnpackConfiguration{
Platform: &p,
Snapshotter: uc[i].Snapshotter,
}
}
return auc
}
func unpackFromProto(auc []*transfertypes.UnpackConfiguration) []UnpackConfiguration {
uc := make([]UnpackConfiguration, len(auc))
for i := range auc {
uc[i].Snapshotter = auc[i].Snapshotter
if auc[i].Platform != nil {
uc[i].Platform.OS = auc[i].Platform.OS
uc[i].Platform.Architecture = auc[i].Platform.Architecture
uc[i].Platform.Variant = auc[i].Platform.Variant
}
}
return uc
}
func imageName(annotations map[string]string, ociCleanup func(string) string) string {
name := annotations[images.AnnotationImageName]
if name != "" {
return name
}
name = annotations[ocispec.AnnotationRefName]
if name != "" {
if ociCleanup != nil {
name = ociCleanup(name)
}
}
return name
}

View File

@ -0,0 +1,293 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package image
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
transfertypes "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/pkg/transfer/plugins"
tstreaming "github.com/containerd/containerd/pkg/transfer/streaming"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/typeurl"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func init() {
// TODO: Move this to separate package?
plugins.Register(&transfertypes.OCIRegistry{}, &OCIRegistry{})
}
// Initialize with hosts, authorizer callback, and headers
func NewOCIRegistry(ref string, headers http.Header, creds CredentialHelper) *OCIRegistry {
// Create an authorizer
var aopts []docker.AuthorizerOpt
if creds != nil {
// TODO: Support bearer
aopts = append(aopts, docker.WithAuthCreds(func(host string) (string, string, error) {
c, err := creds.GetCredentials(context.Background(), ref, host)
if err != nil {
return "", "", err
}
return c.Username, c.Secret, nil
}))
}
ropts := []docker.RegistryOpt{
docker.WithAuthorizer(docker.NewDockerAuthorizer(aopts...)),
}
// TODO: Apply local configuration, maybe dynamically create resolver when requested
resolver := docker.NewResolver(docker.ResolverOptions{
Hosts: docker.ConfigureDefaultRegistries(ropts...),
Headers: headers,
})
return &OCIRegistry{
reference: ref,
headers: headers,
creds: creds,
resolver: resolver,
}
}
// From stream
type CredentialHelper interface {
GetCredentials(ctx context.Context, ref, host string) (Credentials, error)
}
type Credentials struct {
Host string
Username string
Secret string
Header string
}
// OCI
type OCIRegistry struct {
reference string
headers http.Header
creds CredentialHelper
resolver remotes.Resolver
// This could be an interface which returns resolver?
// Resolver could also be a plug-able interface, to call out to a program to fetch?
}
func (r *OCIRegistry) String() string {
return fmt.Sprintf("OCI Registry (%s)", r.reference)
}
func (r *OCIRegistry) Image() string {
return r.reference
}
func (r *OCIRegistry) Resolve(ctx context.Context) (name string, desc ocispec.Descriptor, err error) {
return r.resolver.Resolve(ctx, r.reference)
}
func (r *OCIRegistry) Fetcher(ctx context.Context, ref string) (transfer.Fetcher, error) {
return r.resolver.Fetcher(ctx, ref)
}
func (r *OCIRegistry) Pusher(ctx context.Context, desc ocispec.Descriptor) (transfer.Pusher, error) {
var ref = r.reference
// Annotate ref with digest to push only push tag for single digest
if !strings.Contains(ref, "@") {
ref = ref + "@" + desc.Digest.String()
}
return r.resolver.Pusher(ctx, ref)
}
func (r *OCIRegistry) MarshalAny(ctx context.Context, sm streaming.StreamCreator) (typeurl.Any, error) {
res := &transfertypes.RegistryResolver{}
if r.headers != nil {
res.Headers = map[string]string{}
for k := range r.headers {
res.Headers[k] = r.headers.Get(k)
}
}
if r.creds != nil {
sid := tstreaming.GenerateID("creds")
stream, err := sm.Create(ctx, sid)
if err != nil {
return nil, err
}
go func() {
// Check for context cancellation as well
for {
select {
case <-ctx.Done():
return
default:
}
req, err := stream.Recv()
if err != nil {
// If not EOF, log error
return
}
var s transfertypes.AuthRequest
if err := typeurl.UnmarshalTo(req, &s); err != nil {
log.G(ctx).WithError(err).Error("failed to unmarshal credential request")
continue
}
creds, err := r.creds.GetCredentials(ctx, s.Reference, s.Host)
if err != nil {
log.G(ctx).WithError(err).Error("failed to get credentials")
continue
}
var resp transfertypes.AuthResponse
if creds.Header != "" {
resp.AuthType = transfertypes.AuthType_HEADER
resp.Secret = creds.Header
} else if creds.Username != "" {
resp.AuthType = transfertypes.AuthType_CREDENTIALS
resp.Username = creds.Username
resp.Secret = creds.Secret
} else {
resp.AuthType = transfertypes.AuthType_REFRESH
resp.Secret = creds.Secret
}
a, err := typeurl.MarshalAny(&resp)
if err != nil {
log.G(ctx).WithError(err).Error("failed to marshal credential response")
continue
}
if err := stream.Send(a); err != nil {
if !errors.Is(err, io.EOF) {
log.G(ctx).WithError(err).Error("unexpected send failure")
}
return
}
}
}()
res.AuthStream = sid
}
s := &transfertypes.OCIRegistry{
Reference: r.reference,
Resolver: res,
}
return typeurl.MarshalAny(s)
}
func (r *OCIRegistry) UnmarshalAny(ctx context.Context, sm streaming.StreamGetter, a typeurl.Any) error {
var (
s transfertypes.OCIRegistry
ropts []docker.RegistryOpt
aopts []docker.AuthorizerOpt
)
if err := typeurl.UnmarshalTo(a, &s); err != nil {
return err
}
if s.Resolver != nil {
if sid := s.Resolver.AuthStream; sid != "" {
stream, err := sm.Get(ctx, sid)
if err != nil {
log.G(ctx).WithError(err).WithField("stream", sid).Debug("failed to get auth stream")
return err
}
r.creds = &credCallback{
stream: stream,
}
aopts = append(aopts, docker.WithAuthCreds(func(host string) (string, string, error) {
c, err := r.creds.GetCredentials(context.Background(), s.Reference, host)
if err != nil {
return "", "", err
}
return c.Username, c.Secret, nil
}))
}
r.headers = http.Header{}
for k, v := range s.Resolver.Headers {
r.headers.Add(k, v)
}
}
authorizer := docker.NewDockerAuthorizer(aopts...)
ropts = append(ropts, docker.WithAuthorizer(authorizer))
r.reference = s.Reference
r.resolver = docker.NewResolver(docker.ResolverOptions{
Hosts: docker.ConfigureDefaultRegistries(ropts...),
Headers: r.headers,
})
return nil
}
type credCallback struct {
sync.Mutex
stream streaming.Stream
}
func (cc *credCallback) GetCredentials(ctx context.Context, ref, host string) (Credentials, error) {
cc.Lock()
defer cc.Unlock()
ar := &transfertypes.AuthRequest{
Host: host,
Reference: ref,
}
any, err := typeurl.MarshalAny(ar)
if err != nil {
return Credentials{}, err
}
if err := cc.stream.Send(any); err != nil {
return Credentials{}, err
}
resp, err := cc.stream.Recv()
if err != nil {
return Credentials{}, err
}
var s transfertypes.AuthResponse
if err := typeurl.UnmarshalTo(resp, &s); err != nil {
return Credentials{}, err
}
creds := Credentials{
Host: host,
}
switch s.AuthType {
case transfertypes.AuthType_CREDENTIALS:
creds.Username = s.Username
creds.Secret = s.Secret
case transfertypes.AuthType_REFRESH:
creds.Secret = s.Secret
case transfertypes.AuthType_HEADER:
creds.Header = s.Secret
}
return creds, nil
}

View File

@ -0,0 +1,126 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"encoding/json"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/pkg/transfer"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func (ts *localTransferService) importStream(ctx context.Context, i transfer.ImageImporter, is transfer.ImageStorer, tops *transfer.Config) error {
ctx, done, err := ts.withLease(ctx)
if err != nil {
return err
}
defer done(ctx)
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: "Importing",
})
}
index, err := i.Import(ctx, ts.content)
if err != nil {
return err
}
var descriptors []ocispec.Descriptor
// If save index, add index
descriptors = append(descriptors, index)
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
// Only save images at top level
if desc.Digest != index.Digest {
return images.Children(ctx, ts.content, desc)
}
p, err := content.ReadBlob(ctx, ts.content, desc)
if err != nil {
return nil, err
}
var idx ocispec.Index
if err := json.Unmarshal(p, &idx); err != nil {
return nil, err
}
for _, m := range idx.Manifests {
m1 := m
m1.Annotations = mergeMap(m.Annotations, map[string]string{"io.containerd.import.ref-type": "name"})
descriptors = append(descriptors, m1)
// If add digest references, add twice
m2 := m
m2.Annotations = mergeMap(m.Annotations, map[string]string{"io.containerd.import.ref-type": "digest"})
descriptors = append(descriptors, m2)
}
return idx.Manifests, nil
}
if f, ok := is.(transfer.ImageFilterer); ok {
handler = f.ImageFilter(handler, ts.content)
}
if err := images.WalkNotEmpty(ctx, handler, index); err != nil {
return err
}
for _, desc := range descriptors {
img, err := is.Store(ctx, desc, ts.images)
if err != nil {
if errdefs.IsNotFound(err) {
continue
}
return err
}
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: "saved",
Name: img.Name,
})
}
}
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: "Completed import",
})
}
return nil
}
func mergeMap(m1, m2 map[string]string) map[string]string {
merged := make(map[string]string, len(m1)+len(m2))
for k, v := range m1 {
merged[k] = v
}
for k, v := range m2 {
merged[k] = v
}
return merged
}

View File

@ -0,0 +1,276 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"sort"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/remotes"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type ProgressTracker struct {
root string
transferState string
added chan jobUpdate
waitC chan struct{}
parents map[digest.Digest][]ocispec.Descriptor
parentL sync.Mutex
}
type jobState uint8
const (
jobAdded jobState = iota
jobInProgress
jobComplete
)
type jobStatus struct {
state jobState
name string
parents []string
progress int64
desc ocispec.Descriptor
}
type jobUpdate struct {
desc ocispec.Descriptor
exists bool
//children []ocispec.Descriptor
}
type ActiveJobs interface {
Status(string) (content.Status, bool)
}
type StatusTracker interface {
Active(context.Context, ...string) (ActiveJobs, error)
Check(context.Context, digest.Digest) (bool, error)
}
// NewProgressTracker tracks content download progress
func NewProgressTracker(root, transferState string) *ProgressTracker {
return &ProgressTracker{
root: root,
transferState: transferState,
added: make(chan jobUpdate, 1),
waitC: make(chan struct{}),
parents: map[digest.Digest][]ocispec.Descriptor{},
}
}
func (j *ProgressTracker) HandleProgress(ctx context.Context, pf transfer.ProgressFunc, pt StatusTracker) {
defer close(j.waitC)
// Instead of ticker, just delay
jobs := map[digest.Digest]*jobStatus{}
tc := time.NewTicker(time.Millisecond * 300)
update := func() {
// TODO: Filter by references
active, err := pt.Active(ctx)
if err != nil {
log.G(ctx).WithError(err).Error("failed to get statuses for progress")
}
for dgst, job := range jobs {
if job.state != jobComplete {
status, ok := active.Status(job.name)
if ok {
if status.Offset > job.progress {
pf(transfer.Progress{
Event: j.transferState,
Name: job.name,
Parents: job.parents,
Progress: status.Offset,
Total: status.Total,
})
job.progress = status.Offset
job.state = jobInProgress
jobs[dgst] = job
}
} else {
ok, err := pt.Check(ctx, job.desc.Digest)
if err != nil {
log.G(ctx).WithError(err).Error("failed to get statuses for progress")
} else if ok {
pf(transfer.Progress{
Event: "complete",
Name: job.name,
Parents: job.parents,
Progress: job.desc.Size,
Total: job.desc.Size,
})
}
job.state = jobComplete
jobs[dgst] = job
}
}
}
}
for {
select {
case update := <-j.added:
job, ok := jobs[update.desc.Digest]
if !ok {
// Only captures the parents defined before,
// could handle parent updates in same thread
// if there is a synchronization issue
var parents []string
j.parentL.Lock()
for _, parent := range j.parents[update.desc.Digest] {
parents = append(parents, remotes.MakeRefKey(ctx, parent))
}
j.parentL.Unlock()
if len(parents) == 0 {
parents = []string{j.root}
}
name := remotes.MakeRefKey(ctx, update.desc)
job = &jobStatus{
state: jobAdded,
name: name,
parents: parents,
desc: update.desc,
}
jobs[update.desc.Digest] = job
pf(transfer.Progress{
Event: "waiting",
Name: name,
Parents: parents,
//Digest: desc.Digest.String(),
Progress: 0,
Total: update.desc.Size,
})
}
if update.exists {
pf(transfer.Progress{
Event: "already exists",
Name: remotes.MakeRefKey(ctx, update.desc),
Progress: update.desc.Size,
Total: update.desc.Size,
})
job.state = jobComplete
job.progress = job.desc.Size
}
case <-tc.C:
update()
// Next timer?
case <-ctx.Done():
update()
return
}
}
}
// Add adds a descriptor to be tracked
func (j *ProgressTracker) Add(desc ocispec.Descriptor) {
if j == nil {
return
}
j.added <- jobUpdate{
desc: desc,
}
}
func (j *ProgressTracker) MarkExists(desc ocispec.Descriptor) {
if j == nil {
return
}
j.added <- jobUpdate{
desc: desc,
exists: true,
}
}
// Adds hierarchy information
func (j *ProgressTracker) AddChildren(desc ocispec.Descriptor, children []ocispec.Descriptor) {
if j == nil || len(children) == 0 {
return
}
j.parentL.Lock()
defer j.parentL.Unlock()
for _, child := range children {
j.parents[child.Digest] = append(j.parents[child.Digest], desc)
}
}
func (j *ProgressTracker) Wait() {
// timeout rather than rely on cancel
timeout := time.After(10 * time.Second)
select {
case <-timeout:
case <-j.waitC:
}
}
type contentActive struct {
active []content.Status
}
func (c *contentActive) Status(ref string) (content.Status, bool) {
idx := sort.Search(len(c.active), func(i int) bool { return c.active[i].Ref >= ref })
if idx < len(c.active) && c.active[idx].Ref == ref {
return c.active[idx], true
}
return content.Status{}, false
}
type contentStatusTracker struct {
cs content.Store
}
func NewContentStatusTracker(cs content.Store) StatusTracker {
return &contentStatusTracker{
cs: cs,
}
}
func (c *contentStatusTracker) Active(ctx context.Context, _ ...string) (ActiveJobs, error) {
active, err := c.cs.ListStatuses(ctx)
if err != nil {
log.G(ctx).WithError(err).Error("failed to list statuses for progress")
}
sort.Slice(active, func(i, j int) bool {
return active[i].Ref < active[j].Ref
})
return &contentActive{
active: active,
}, nil
}
func (c *contentStatusTracker) Check(ctx context.Context, dgst digest.Digest) (bool, error) {
_, err := c.cs.Info(ctx, dgst)
if err == nil {
return true, nil
}
return false, nil
}

243
pkg/transfer/local/pull.go Normal file
View File

@ -0,0 +1,243 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"fmt"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/pkg/unpack"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
func (ts *localTransferService) pull(ctx context.Context, ir transfer.ImageFetcher, is transfer.ImageStorer, tops *transfer.Config) error {
ctx, done, err := ts.withLease(ctx)
if err != nil {
return err
}
defer done(ctx)
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: fmt.Sprintf("Resolving from %s", ir),
})
}
name, desc, err := ir.Resolve(ctx)
if err != nil {
return fmt.Errorf("failed to resolve image: %w", err)
}
if desc.MediaType == images.MediaTypeDockerSchema1Manifest {
// Explicitly call out schema 1 as deprecated and not supported
return fmt.Errorf("schema 1 image manifests are no longer supported: %w", errdefs.ErrInvalidArgument)
}
// TODO: Handle already exists
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: fmt.Sprintf("Pulling from %s", ir),
})
tops.Progress(transfer.Progress{
Event: "fetching image content",
Name: name,
//Digest: img.Target.Digest.String(),
})
}
fetcher, err := ir.Fetcher(ctx, name)
if err != nil {
return fmt.Errorf("failed to get fetcher for %q: %w", name, err)
}
var (
handler images.Handler
unpacker *unpack.Unpacker
// has a config media type bug (distribution#1622)
hasMediaTypeBug1622 bool
store = ts.content
progressTracker *ProgressTracker
)
ctx, cancel := context.WithCancel(ctx)
if tops.Progress != nil {
progressTracker = NewProgressTracker(name, "downloading") //Pass in first name as root
go progressTracker.HandleProgress(ctx, tops.Progress, NewContentStatusTracker(store))
defer progressTracker.Wait()
}
defer cancel()
// Get all the children for a descriptor
childrenHandler := images.ChildrenHandler(store)
if f, ok := is.(transfer.ImageFilterer); ok {
childrenHandler = f.ImageFilter(childrenHandler, store)
}
// Sort and limit manifests if a finite number is needed
//if limit > 0 {
// childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
//}
//SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc {
checkNeedsFix := images.HandlerFunc(
func(_ context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
// set to true if there is application/octet-stream media type
if desc.MediaType == docker.LegacyConfigMediaType {
hasMediaTypeBug1622 = true
}
return []ocispec.Descriptor{}, nil
},
)
appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, name)
if err != nil {
return err
}
// TODO: Allow initialization from configuration
baseHandlers := []images.Handler{}
if tops.Progress != nil {
baseHandlers = append(baseHandlers, images.HandlerFunc(
func(_ context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
progressTracker.Add(desc)
return []ocispec.Descriptor{}, nil
},
))
baseChildrenHandler := childrenHandler
childrenHandler = images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (children []ocispec.Descriptor, err error) {
children, err = baseChildrenHandler(ctx, desc)
if err != nil {
return
}
progressTracker.AddChildren(desc, children)
return
})
}
handler = images.Handlers(append(baseHandlers,
fetchHandler(store, fetcher, progressTracker),
checkNeedsFix,
childrenHandler, // List children to track hierarchy
appendDistSrcLabelHandler,
)...)
// TODO: Should available platforms be a configuration of the service?
// First find suitable platforms to unpack into
//if unpacker, ok := is.
if iu, ok := is.(transfer.ImageUnpacker); ok {
unpacks := iu.UnpackPlatforms()
if len(unpacks) > 0 {
uopts := []unpack.UnpackerOpt{}
for _, u := range unpacks {
uopts = append(uopts, unpack.WithUnpackPlatform(u))
}
if ts.limiter != nil {
uopts = append(uopts, unpack.WithLimiter(ts.limiter))
}
//if uconfig.DuplicationSuppressor != nil {
// uopts = append(uopts, unpack.WithDuplicationSuppressor(uconfig.DuplicationSuppressor))
//}
unpacker, err = unpack.NewUnpacker(ctx, ts.content, uopts...)
if err != nil {
return fmt.Errorf("unable to initialize unpacker: %w", err)
}
handler = unpacker.Unpack(handler)
}
}
if err := images.Dispatch(ctx, handler, ts.limiter, desc); err != nil {
if unpacker != nil {
// wait for unpacker to cleanup
unpacker.Wait()
}
return err
}
// NOTE(fuweid): unpacker defers blobs download. before create image
// record in ImageService, should wait for unpacking(including blobs
// download).
if unpacker != nil {
if _, err = unpacker.Wait(); err != nil {
return err
}
// TODO: Check results to make sure unpack was successful
}
if hasMediaTypeBug1622 {
if desc, err = docker.ConvertManifest(ctx, store, desc); err != nil {
return err
}
}
img, err := is.Store(ctx, desc, ts.images)
if err != nil {
return err
}
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: "saved",
Name: img.Name,
//Digest: img.Target.Digest.String(),
})
}
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: fmt.Sprintf("Completed pull from %s", ir),
})
}
return nil
}
func fetchHandler(ingester content.Ingester, fetcher remotes.Fetcher, pt *ProgressTracker) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
"digest": desc.Digest,
"mediatype": desc.MediaType,
"size": desc.Size,
}))
switch desc.MediaType {
case images.MediaTypeDockerSchema1Manifest:
return nil, fmt.Errorf("%v not supported", desc.MediaType)
default:
err := remotes.Fetch(ctx, ingester, fetcher, desc)
if errdefs.IsAlreadyExists(err) {
pt.MarkExists(desc)
return nil, nil
}
return nil, err
}
}
}

270
pkg/transfer/local/push.go Normal file
View File

@ -0,0 +1,270 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"fmt"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/remotes"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func (ts *localTransferService) push(ctx context.Context, ig transfer.ImageGetter, p transfer.ImagePusher, tops *transfer.Config) error {
/*
// TODO: Platform matching
if pushCtx.PlatformMatcher == nil {
if len(pushCtx.Platforms) > 0 {
var ps []ocispec.Platform
for _, platform := range pushCtx.Platforms {
p, err := platforms.Parse(platform)
if err != nil {
return fmt.Errorf("invalid platform %s: %w", platform, err)
}
ps = append(ps, p)
}
pushCtx.PlatformMatcher = platforms.Any(ps...)
} else {
pushCtx.PlatformMatcher = platforms.All
}
}
*/
matcher := platforms.All
// Filter push
img, err := ig.Get(ctx, ts.images)
if err != nil {
return err
}
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: fmt.Sprintf("Pushing to %s", p),
})
tops.Progress(transfer.Progress{
Event: "pushing content",
Name: img.Name,
//Digest: img.Target.Digest.String(),
})
}
var pusher remotes.Pusher
pusher, err = p.Pusher(ctx, img.Target)
if err != nil {
return err
}
var wrapper func(images.Handler) images.Handler
ctx, cancel := context.WithCancel(ctx)
if tops.Progress != nil {
progressTracker := NewProgressTracker(img.Name, "uploading") //Pass in first name as root
p := newProgressPusher(pusher, progressTracker)
go progressTracker.HandleProgress(ctx, tops.Progress, p)
defer progressTracker.Wait()
wrapper = p.WrapHandler
pusher = p
}
defer cancel()
// TODO: Add handler to track parents
/*
// TODO: Add handlers
if len(pushCtx.BaseHandlers) > 0 {
wrapper = func(h images.Handler) images.Handler {
h = images.Handlers(append(pushCtx.BaseHandlers, h)...)
if pushCtx.HandlerWrapper != nil {
h = pushCtx.HandlerWrapper(h)
}
return h
}
} else if pushCtx.HandlerWrapper != nil {
wrapper = pushCtx.HandlerWrapper
}
*/
if err := remotes.PushContent(ctx, pusher, img.Target, ts.content, ts.limiter, matcher, wrapper); err != nil {
return err
}
if tops.Progress != nil {
tops.Progress(transfer.Progress{
Event: "pushed content",
Name: img.Name,
//Digest: img.Target.Digest.String(),
})
tops.Progress(transfer.Progress{
Event: fmt.Sprintf("Completed push to %s", p),
})
}
return nil
}
type progressPusher struct {
remotes.Pusher
progress *ProgressTracker
status *pushStatus
}
type pushStatus struct {
l sync.Mutex
statuses map[string]content.Status
complete map[digest.Digest]struct{}
}
func newProgressPusher(pusher remotes.Pusher, progress *ProgressTracker) *progressPusher {
return &progressPusher{
Pusher: pusher,
progress: progress,
status: &pushStatus{
statuses: map[string]content.Status{},
complete: map[digest.Digest]struct{}{},
},
}
}
func (p *progressPusher) WrapHandler(h images.Handler) images.Handler {
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
p.progress.Add(desc)
subdescs, err = h.Handle(ctx, desc)
p.progress.AddChildren(desc, subdescs)
return
})
}
func (p *progressPusher) Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) {
ref := remotes.MakeRefKey(ctx, d)
p.status.add(ref, d)
cw, err := p.Pusher.Push(ctx, d)
if err != nil {
if errdefs.IsAlreadyExists(err) {
p.progress.MarkExists(d)
p.status.markComplete(ref, d)
}
return nil, err
}
return &progressWriter{
Writer: cw,
ref: ref,
desc: d,
status: p.status,
progress: p.progress,
}, nil
}
func (ps *pushStatus) update(ref string, delta int) {
ps.l.Lock()
status, ok := ps.statuses[ref]
if ok {
if delta > 0 {
status.Offset += int64(delta)
} else if delta < 0 {
status.Offset = 0
}
ps.statuses[ref] = status
}
ps.l.Unlock()
}
func (ps *pushStatus) add(ref string, d ocispec.Descriptor) {
status := content.Status{
Ref: ref,
Offset: 0,
Total: d.Size,
StartedAt: time.Now(),
}
ps.l.Lock()
_, ok := ps.statuses[ref]
_, complete := ps.complete[d.Digest]
if !ok && !complete {
ps.statuses[ref] = status
}
ps.l.Unlock()
}
func (ps *pushStatus) markComplete(ref string, d ocispec.Descriptor) {
ps.l.Lock()
_, ok := ps.statuses[ref]
if ok {
delete(ps.statuses, ref)
}
ps.complete[d.Digest] = struct{}{}
ps.l.Unlock()
}
func (ps *pushStatus) Status(name string) (content.Status, bool) {
ps.l.Lock()
status, ok := ps.statuses[name]
ps.l.Unlock()
return status, ok
}
func (ps *pushStatus) Check(ctx context.Context, dgst digest.Digest) (bool, error) {
ps.l.Lock()
_, ok := ps.complete[dgst]
ps.l.Unlock()
return ok, nil
}
func (p *progressPusher) Active(ctx context.Context, _ ...string) (ActiveJobs, error) {
return p.status, nil
}
func (p *progressPusher) Check(ctx context.Context, dgst digest.Digest) (bool, error) {
return p.status.Check(ctx, dgst)
}
type progressWriter struct {
content.Writer
ref string
desc ocispec.Descriptor
status *pushStatus
progress *ProgressTracker
}
func (pw *progressWriter) Write(p []byte) (n int, err error) {
n, err = pw.Writer.Write(p)
if err != nil {
// TODO: Handle reset error to reset progress
return
}
pw.status.update(pw.ref, n)
return
}
func (pw *progressWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
err := pw.Writer.Commit(ctx, size, expected, opts...)
if err != nil {
if errdefs.IsAlreadyExists(err) {
pw.progress.MarkExists(pw.desc)
}
// TODO: Handle reset error to reset progress
}
pw.status.markComplete(pw.ref, pw.desc)
return err
}

View File

@ -0,0 +1,152 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"fmt"
"io"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/typeurl"
"golang.org/x/sync/semaphore"
)
type localTransferService struct {
leases leases.Manager
content content.Store
images images.Store
// semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads))
limiter *semaphore.Weighted
// TODO: Duplication suppressor
// Configuration
// - Max downloads
// - Max uploads
// Supported platforms
// - Platform -> snapshotter defaults?
}
func NewTransferService(lm leases.Manager, cs content.Store, is images.Store) transfer.Transferrer {
return &localTransferService{
leases: lm,
content: cs,
images: is,
}
}
func (ts *localTransferService) Transfer(ctx context.Context, src interface{}, dest interface{}, opts ...transfer.Opt) error {
topts := &transfer.Config{}
for _, opt := range opts {
opt(topts)
}
// Figure out matrix of whether source destination combination is supported
switch s := src.(type) {
case transfer.ImageFetcher:
switch d := dest.(type) {
case transfer.ImageStorer:
return ts.pull(ctx, s, d, topts)
}
case transfer.ImageGetter:
switch d := dest.(type) {
case transfer.ImagePusher:
return ts.push(ctx, s, d, topts)
}
case transfer.ImageImporter:
switch d := dest.(type) {
case transfer.ImageExportStreamer:
return ts.echo(ctx, s, d, topts)
case transfer.ImageStorer:
return ts.importStream(ctx, s, d, topts)
}
}
return fmt.Errorf("unable to transfer from %s to %s: %w", name(src), name(dest), errdefs.ErrNotImplemented)
}
func name(t interface{}) string {
switch s := t.(type) {
case fmt.Stringer:
return s.String()
case typeurl.Any:
return s.GetTypeUrl()
default:
return fmt.Sprintf("%T", t)
}
}
// echo is mostly used for testing, it implements an import->export which is
// a no-op which only roundtrips the bytes.
func (ts *localTransferService) echo(ctx context.Context, i transfer.ImageImporter, e transfer.ImageExportStreamer, tops *transfer.Config) error {
iis, ok := i.(transfer.ImageImportStreamer)
if !ok {
return fmt.Errorf("echo requires access to raw stream: %w", errdefs.ErrNotImplemented)
}
r, _, err := iis.ImportStream(ctx)
if err != nil {
return err
}
wc, _, err := e.ExportStream(ctx)
if err != nil {
return err
}
// TODO: Use fixed buffer? Send write progress?
_, err = io.Copy(wc, r)
if werr := wc.Close(); werr != nil && err == nil {
err = werr
}
return err
}
// WithLease attaches a lease on the context
func (ts *localTransferService) withLease(ctx context.Context, opts ...leases.Opt) (context.Context, func(context.Context) error, error) {
nop := func(context.Context) error { return nil }
_, ok := leases.FromContext(ctx)
if ok {
return ctx, nop, nil
}
ls := ts.leases
if len(opts) == 0 {
// Use default lease configuration if no options provided
opts = []leases.Opt{
leases.WithRandomID(),
leases.WithExpiration(24 * time.Hour),
}
}
l, err := ls.Create(ctx, opts...)
if err != nil {
return ctx, nop, err
}
ctx = leases.WithLease(ctx, l.ID)
return ctx, func(ctx context.Context) error {
return ls.Delete(ctx, l)
}, nil
}

View File

@ -0,0 +1,63 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"fmt"
"reflect"
"sync"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/typeurl"
)
var register = struct {
sync.RWMutex
r map[string]reflect.Type
}{}
func Register(apiObject, transferObject interface{}) {
url, err := typeurl.TypeURL(apiObject)
if err != nil {
panic(err)
}
// Lock
register.Lock()
defer register.Unlock()
if register.r == nil {
register.r = map[string]reflect.Type{}
}
if _, ok := register.r[url]; ok {
panic(fmt.Sprintf("url already registered: %v", url))
}
t := reflect.TypeOf(transferObject)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
register.r[url] = t
}
func ResolveType(any typeurl.Any) (interface{}, error) {
register.RLock()
defer register.RUnlock()
if register.r != nil {
if t, ok := register.r[any.GetTypeUrl()]; ok {
return reflect.New(t).Interface(), nil
}
}
return nil, fmt.Errorf("%v not registered: %w", any.GetTypeUrl(), errdefs.ErrNotFound)
}

View File

@ -0,0 +1,121 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"context"
"errors"
"io"
transferapi "github.com/containerd/containerd/api/services/transfer/v1"
transfertypes "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/pkg/transfer"
tstreaming "github.com/containerd/containerd/pkg/transfer/streaming"
"github.com/containerd/typeurl"
"google.golang.org/protobuf/types/known/anypb"
)
type proxyTransferrer struct {
client transferapi.TransferClient
streamCreator streaming.StreamCreator
}
// NewTransferrer returns a new transferr which communicates over a GRPC
// connection using the containerd transfer API
func NewTransferrer(client transferapi.TransferClient, sc streaming.StreamCreator) transfer.Transferrer {
return &proxyTransferrer{
client: client,
streamCreator: sc,
}
}
func (p *proxyTransferrer) Transfer(ctx context.Context, src interface{}, dst interface{}, opts ...transfer.Opt) error {
o := &transfer.Config{}
for _, opt := range opts {
opt(o)
}
apiOpts := &transferapi.TransferOptions{}
if o.Progress != nil {
sid := tstreaming.GenerateID("progress")
stream, err := p.streamCreator.Create(ctx, sid)
if err != nil {
return err
}
apiOpts.ProgressStream = sid
go func() {
for {
a, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) {
log.G(ctx).WithError(err).Error("progress stream failed to recv")
}
return
}
i, err := typeurl.UnmarshalAny(a)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to unmarshal progress object: %v", a.GetTypeUrl())
}
switch v := i.(type) {
case *transfertypes.Progress:
o.Progress(transfer.Progress{
Event: v.Event,
Name: v.Name,
Parents: v.Parents,
Progress: v.Progress,
Total: v.Total,
})
default:
log.G(ctx).Warnf("unhandled progress object %T: %v", i, a.GetTypeUrl())
}
}
}()
}
asrc, err := p.marshalAny(ctx, src)
if err != nil {
return err
}
adst, err := p.marshalAny(ctx, dst)
if err != nil {
return err
}
req := &transferapi.TransferRequest{
Source: &anypb.Any{
TypeUrl: asrc.GetTypeUrl(),
Value: asrc.GetValue(),
},
Destination: &anypb.Any{
TypeUrl: adst.GetTypeUrl(),
Value: adst.GetValue(),
},
Options: apiOpts,
}
_, err = p.client.Transfer(ctx, req)
return err
}
func (p *proxyTransferrer) marshalAny(ctx context.Context, i interface{}) (typeurl.Any, error) {
switch m := i.(type) {
case streamMarshaler:
return m.MarshalAny(ctx, p.streamCreator)
}
return typeurl.MarshalAny(i)
}
type streamMarshaler interface {
MarshalAny(context.Context, streaming.StreamCreator) (typeurl.Any, error)
}

View File

@ -0,0 +1,210 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package streaming
import (
"context"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"io"
"sync"
"time"
transferapi "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/typeurl"
)
const maxRead = 32 * 1024
const windowSize = 2 * maxRead
var bufPool = &sync.Pool{
New: func() interface{} {
buffer := make([]byte, maxRead)
return &buffer
},
}
func SendStream(ctx context.Context, r io.Reader, stream streaming.Stream) {
window := make(chan int32)
go func() {
defer close(window)
for {
select {
case <-ctx.Done():
return
default:
}
any, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) {
log.G(ctx).WithError(err).Error("send stream ended without EOF")
}
return
}
i, err := typeurl.UnmarshalAny(any)
if err != nil {
log.G(ctx).WithError(err).Error("failed to unmarshal stream object")
continue
}
switch v := i.(type) {
case *transferapi.WindowUpdate:
select {
case <-ctx.Done():
return
case window <- v.Update:
}
default:
log.G(ctx).Errorf("unexpected stream object of type %T", i)
}
}
}()
go func() {
defer stream.Close()
buf := bufPool.Get().(*[]byte)
defer bufPool.Put(buf)
var remaining int32
for {
if remaining > 0 {
// Don't wait for window update since there are remaining
select {
case <-ctx.Done():
// TODO: Send error message on stream before close to allow remote side to return error
return
case update := <-window:
remaining += update
default:
}
} else {
// Block until window updated
select {
case <-ctx.Done():
// TODO: Send error message on stream before close to allow remote side to return error
return
case update := <-window:
remaining = update
}
}
var max int32 = maxRead
if max > remaining {
max = remaining
}
b := (*buf)[:max]
n, err := r.Read(b)
if err != nil {
if !errors.Is(err, io.EOF) {
log.G(ctx).WithError(err).Errorf("failed to read stream source")
// TODO: Send error message on stream before close to allow remote side to return error
}
return
}
remaining = remaining - int32(n)
data := &transferapi.Data{
Data: b[:n],
}
any, err := typeurl.MarshalAny(data)
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to marshal data for send")
// TODO: Send error message on stream before close to allow remote side to return error
return
}
if err := stream.Send(any); err != nil {
log.G(ctx).WithError(err).Errorf("send failed")
return
}
}
}()
}
func ReceiveStream(ctx context.Context, stream streaming.Stream) io.Reader {
r, w := io.Pipe()
go func() {
defer stream.Close()
var window int32
for {
var werr error
if window < windowSize {
update := &transferapi.WindowUpdate{
Update: windowSize,
}
any, err := typeurl.MarshalAny(update)
if err != nil {
w.CloseWithError(fmt.Errorf("failed to marshal window update: %w", err))
return
}
// check window update error after recv, stream may be complete
if werr = stream.Send(any); werr == nil {
window += windowSize
} else if werr == io.EOF {
// TODO: Why does send return EOF here
werr = nil
}
}
any, err := stream.Recv()
if err != nil {
if err == io.EOF {
err = nil
} else {
err = fmt.Errorf("received failed: %w", err)
}
w.CloseWithError(err)
return
} else if werr != nil {
// Try receive before erroring out
w.CloseWithError(fmt.Errorf("failed to send window update: %w", werr))
return
}
i, err := typeurl.UnmarshalAny(any)
if err != nil {
w.CloseWithError(fmt.Errorf("failed to unmarshal received object: %w", err))
return
}
switch v := i.(type) {
case *transferapi.Data:
n, err := w.Write(v.Data)
if err != nil {
w.CloseWithError(fmt.Errorf("failed to unmarshal received object: %w", err))
// Close will error out sender
return
}
window = window - int32(n)
// TODO: Handle error case
default:
log.G(ctx).Warnf("Ignoring unknown stream object of type %T", i)
continue
}
}
}()
return r
}
func GenerateID(prefix string) string {
t := time.Now()
var b [3]byte
rand.Read(b[:])
return fmt.Sprintf("%s-%d-%s", prefix, t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:]))
}

View File

@ -0,0 +1,165 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package streaming
import (
"bytes"
"context"
"io"
"testing"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/typeurl"
)
func FuzzSendAndReceive(f *testing.F) {
f.Add([]byte{})
f.Add([]byte{0})
f.Add(bytes.Repeat([]byte{0}, windowSize+1))
f.Add([]byte("hello"))
f.Add(bytes.Repeat([]byte("hello"), windowSize+1))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
f.Fuzz(func(t *testing.T, expected []byte) {
runSendAndReceiveFuzz(ctx, t, expected)
runSendAndReceiveChainFuzz(ctx, t, expected)
runWriterFuzz(ctx, t, expected)
})
}
func runSendAndReceiveFuzz(ctx context.Context, t *testing.T, expected []byte) {
rs, ws := pipeStream()
r, w := io.Pipe()
SendStream(ctx, r, ws)
or := ReceiveStream(ctx, rs)
go func() {
io.Copy(w, bytes.NewBuffer(expected))
w.Close()
}()
actual, err := io.ReadAll(or)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(expected, actual) {
t.Fatalf("received bytes are not equal\n\tactual: %v\n\texpected:%v", actual, expected)
}
}
func runSendAndReceiveChainFuzz(ctx context.Context, t *testing.T, expected []byte) {
r, w := io.Pipe()
or := chainStreams(ctx, chainStreams(ctx, chainStreams(ctx, r)))
go func() {
io.Copy(w, bytes.NewBuffer(expected))
w.Close()
}()
actual, err := io.ReadAll(or)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(expected, actual) {
t.Fatalf("received bytes are not equal\n\tactual: %v\n\texpected:%v", actual, expected)
}
}
func runWriterFuzz(ctx context.Context, t *testing.T, expected []byte) {
rs, ws := pipeStream()
wc := WriteByteStream(ctx, ws)
or := ReceiveStream(ctx, rs)
go func() {
io.Copy(wc, bytes.NewBuffer(expected))
wc.Close()
}()
actual, err := io.ReadAll(or)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(expected, actual) {
t.Fatalf("received bytes are not equal\n\tactual: %v\n\texpected:%v", actual, expected)
}
}
func chainStreams(ctx context.Context, r io.Reader) io.Reader {
rs, ws := pipeStream()
SendStream(ctx, r, ws)
return ReceiveStream(ctx, rs)
}
func pipeStream() (streaming.Stream, streaming.Stream) {
r := make(chan typeurl.Any)
rc := make(chan struct{})
w := make(chan typeurl.Any)
wc := make(chan struct{})
rs := &testStream{
send: w,
recv: r,
closer: wc,
remote: rc,
}
ws := &testStream{
send: r,
recv: w,
closer: rc,
remote: wc,
}
return rs, ws
}
type testStream struct {
send chan<- typeurl.Any
recv <-chan typeurl.Any
closer chan struct{}
remote <-chan struct{}
}
func (ts *testStream) Send(a typeurl.Any) error {
select {
case <-ts.remote:
return io.ErrClosedPipe
case ts.send <- a:
}
return nil
}
func (ts *testStream) Recv() (typeurl.Any, error) {
select {
case <-ts.remote:
return nil, io.EOF
case a := <-ts.recv:
return a, nil
}
}
func (ts *testStream) Close() error {
select {
case <-ts.closer:
return nil
default:
}
close(ts.closer)
return nil
}

View File

@ -0,0 +1,130 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package streaming
import (
"context"
"errors"
"io"
"sync/atomic"
transferapi "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/typeurl"
)
func WriteByteStream(ctx context.Context, stream streaming.Stream) io.WriteCloser {
wbs := &writeByteStream{
ctx: ctx,
stream: stream,
updated: make(chan struct{}, 1),
}
go func() {
for {
select {
case <-ctx.Done():
return
default:
}
any, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) {
log.G(ctx).WithError(err).Error("send byte stream ended without EOF")
}
return
}
i, err := typeurl.UnmarshalAny(any)
if err != nil {
log.G(ctx).WithError(err).Error("failed to unmarshal stream object")
continue
}
switch v := i.(type) {
case *transferapi.WindowUpdate:
atomic.AddInt32(&wbs.remaining, v.Update)
select {
case <-ctx.Done():
return
case wbs.updated <- struct{}{}:
default:
// Don't block if no writes are waiting
}
default:
log.G(ctx).Errorf("unexpected stream object of type %T", i)
}
}
}()
return wbs
}
type writeByteStream struct {
ctx context.Context
stream streaming.Stream
remaining int32
updated chan struct{}
}
func (wbs *writeByteStream) Write(p []byte) (n int, err error) {
for len(p) > 0 {
remaining := atomic.LoadInt32(&wbs.remaining)
if remaining == 0 {
// Don't wait for window update since there are remaining
select {
case <-wbs.ctx.Done():
// TODO: Send error message on stream before close to allow remote side to return error
err = io.ErrShortWrite
return
case <-wbs.updated:
continue
}
}
var max int32 = maxRead
if max > int32(len(p)) {
max = int32(len(p))
}
if max > remaining {
max = remaining
}
// TODO: continue
//remaining = remaining - int32(n)
data := &transferapi.Data{
Data: p[:max],
}
var any typeurl.Any
any, err = typeurl.MarshalAny(data)
if err != nil {
log.G(wbs.ctx).WithError(err).Errorf("failed to marshal data for send")
// TODO: Send error message on stream before close to allow remote side to return error
return
}
if err = wbs.stream.Send(any); err != nil {
log.G(wbs.ctx).WithError(err).Errorf("send failed")
return
}
n += int(max)
p = p[max:]
atomic.AddInt32(&wbs.remaining, -1*max)
}
return
}
func (wbs *writeByteStream) Close() error {
return wbs.stream.Close()
}

117
pkg/transfer/transfer.go Normal file
View File

@ -0,0 +1,117 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package transfer
import (
"context"
"io"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/pkg/unpack"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type Transferrer interface {
Transfer(ctx context.Context, source interface{}, destination interface{}, opts ...Opt) error
}
type ImageResolver interface {
Resolve(ctx context.Context) (name string, desc ocispec.Descriptor, err error)
}
type ImageFetcher interface {
ImageResolver
Fetcher(ctx context.Context, ref string) (Fetcher, error)
}
type ImagePusher interface {
Pusher(context.Context, ocispec.Descriptor) (Pusher, error)
}
type Fetcher interface {
Fetch(context.Context, ocispec.Descriptor) (io.ReadCloser, error)
}
type Pusher interface {
Push(context.Context, ocispec.Descriptor) (content.Writer, error)
}
// ImageFilterer is used to filter out child objects of an image
type ImageFilterer interface {
ImageFilter(images.HandlerFunc, content.Store) images.HandlerFunc
}
// ImageStorer is a type which is capable of storing an image to
// for a provided descriptor
type ImageStorer interface {
Store(context.Context, ocispec.Descriptor, images.Store) (images.Image, error)
}
// ImageGetter is type which returns an image from an image store
type ImageGetter interface {
Get(context.Context, images.Store) (images.Image, error)
}
// ImageImporter imports an image into a content store
type ImageImporter interface {
Import(context.Context, content.Store) (ocispec.Descriptor, error)
}
// ImageImportStreamer returns an import streamer based on OCI or
// Docker image tar archives. The stream should be a raw tar stream
// and without compression.
type ImageImportStreamer interface {
ImportStream(context.Context) (io.Reader, string, error)
}
type ImageExportStreamer interface {
ExportStream(context.Context) (io.WriteCloser, string, error)
}
type ImageUnpacker interface {
// TODO: consider using unpack options
UnpackPlatforms() []unpack.Platform
}
type ProgressFunc func(Progress)
type Config struct {
Progress ProgressFunc
}
type Opt func(*Config)
func WithProgress(f ProgressFunc) Opt {
return func(opts *Config) {
opts.Progress = f
}
}
// Progress is used to represent a particular progress event or incremental
// update for the provided named object. The parents represent the names of
// the objects which initiated the progress for the provided named object.
// The name and what object it represents is determined by the implementation.
type Progress struct {
Event string
Name string
Parents []string
Progress int64
Total int64
// Descriptor?
}

View File

@ -78,10 +78,14 @@ const (
EventPlugin Type = "io.containerd.event.v1"
// LeasePlugin implements lease manager
LeasePlugin Type = "io.containerd.lease.v1"
// Streaming implements a stream manager
StreamingPlugin Type = "io.containerd.streaming.v1"
// TracingProcessorPlugin implements a open telemetry span processor
TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1"
// NRIApiPlugin implements the NRI adaptation interface for containerd.
NRIApiPlugin Type = "io.containerd.nri.v1"
// TransferPlugin implements a transfer service
TransferPlugin Type = "io.containerd.transfer.v1"
)
const (

View File

@ -0,0 +1,257 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package streaming
import (
"context"
"sync"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/gc"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/plugin"
)
func init() {
plugin.Register(&plugin.Registration{
Type: plugin.StreamingPlugin,
ID: "manager",
Requires: []plugin.Type{
plugin.MetadataPlugin,
},
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
md, err := ic.Get(plugin.MetadataPlugin)
if err != nil {
return nil, err
}
sm := &streamManager{
streams: map[string]map[string]*managedStream{},
byLease: map[string]map[string]map[string]struct{}{},
}
md.(*metadata.DB).RegisterCollectibleResource(metadata.ResourceStream, sm)
return sm, nil
},
})
}
type streamManager struct {
// streams maps namespace -> name -> stream
streams map[string]map[string]*managedStream
byLease map[string]map[string]map[string]struct{}
rwlock sync.RWMutex
}
func (sm *streamManager) Register(ctx context.Context, name string, stream streaming.Stream) error {
ns, _ := namespaces.Namespace(ctx)
ls, _ := leases.FromContext(ctx)
ms := &managedStream{
Stream: stream,
ns: ns,
name: name,
lease: ls,
manager: sm,
}
sm.rwlock.Lock()
defer sm.rwlock.Unlock()
nsMap, ok := sm.streams[ns]
if !ok {
nsMap = make(map[string]*managedStream)
sm.streams[ns] = nsMap
}
if _, ok := nsMap[name]; ok {
return errdefs.ErrAlreadyExists
}
nsMap[name] = ms
if ls != "" {
nsMap, ok := sm.byLease[ns]
if !ok {
nsMap = make(map[string]map[string]struct{})
sm.byLease[ns] = nsMap
}
lsMap, ok := nsMap[ls]
if !ok {
lsMap = make(map[string]struct{})
nsMap[ls] = lsMap
}
lsMap[name] = struct{}{}
}
return nil
}
func (sm *streamManager) Get(ctx context.Context, name string) (streaming.Stream, error) {
ns, _ := namespaces.Namespace(ctx)
sm.rwlock.RLock()
defer sm.rwlock.RUnlock()
nsMap, ok := sm.streams[ns]
if !ok {
return nil, errdefs.ErrNotFound
}
stream, ok := nsMap[name]
if !ok {
return nil, errdefs.ErrNotFound
}
return stream, nil
}
func (sm *streamManager) StartCollection(context.Context) (metadata.CollectionContext, error) {
// lock now and collection will unlock
sm.rwlock.Lock()
return &collectionContext{
manager: sm,
}, nil
}
func (sm *streamManager) ReferenceLabel() string {
return "stream"
}
type managedStream struct {
streaming.Stream
ns string
name string
lease string
manager *streamManager
}
func (m *managedStream) Close() error {
m.manager.rwlock.Lock()
if nsMap, ok := m.manager.streams[m.ns]; ok {
delete(nsMap, m.name)
if len(nsMap) == 0 {
delete(m.manager.streams, m.ns)
}
}
if m.lease != "" {
if nsMap, ok := m.manager.byLease[m.ns]; ok {
if lsMap, ok := nsMap[m.lease]; ok {
delete(lsMap, m.name)
if len(lsMap) == 0 {
delete(nsMap, m.lease)
}
}
if len(nsMap) == 0 {
delete(m.manager.byLease, m.ns)
}
}
}
m.manager.rwlock.Unlock()
return m.Stream.Close()
}
type collectionContext struct {
manager *streamManager
removed []gc.Node
}
func (cc *collectionContext) All(fn func(gc.Node)) {
for ns, nsMap := range cc.manager.streams {
for name := range nsMap {
fn(gc.Node{
Type: metadata.ResourceStream,
Namespace: ns,
Key: name,
})
}
}
}
func (cc *collectionContext) Active(ns string, fn func(gc.Node)) {
if nsMap, ok := cc.manager.streams[ns]; ok {
for name, stream := range nsMap {
// Don't consider leased streams as active, the lease
// will determine the status
// TODO: expire non-active streams
if stream.lease == "" {
fn(gc.Node{
Type: metadata.ResourceStream,
Namespace: ns,
Key: name,
})
}
}
}
}
func (cc *collectionContext) Leased(ns, lease string, fn func(gc.Node)) {
if nsMap, ok := cc.manager.byLease[ns]; ok {
if lsMap, ok := nsMap[lease]; ok {
for name := range lsMap {
fn(gc.Node{
Type: metadata.ResourceStream,
Namespace: ns,
Key: name,
})
}
}
}
}
func (cc *collectionContext) Remove(n gc.Node) {
cc.removed = append(cc.removed, n)
}
func (cc *collectionContext) Cancel() error {
cc.manager.rwlock.Unlock()
return nil
}
func (cc *collectionContext) Finish() error {
defer cc.manager.rwlock.Unlock()
for _, node := range cc.removed {
var lease string
if nsMap, ok := cc.manager.streams[node.Namespace]; ok {
if ms, ok := nsMap[node.Key]; ok {
delete(nsMap, node.Key)
ms.Close()
lease = ms.lease
}
if len(nsMap) == 0 {
delete(cc.manager.streams, node.Namespace)
}
}
if lease != "" {
if nsMap, ok := cc.manager.byLease[node.Namespace]; ok {
if lsMap, ok := nsMap[lease]; ok {
delete(lsMap, node.Key)
if len(lsMap) == 0 {
delete(nsMap, lease)
}
}
if len(nsMap) == 0 {
delete(cc.manager.byLease, node.Namespace)
}
}
}
}
return nil
}

View File

@ -0,0 +1,58 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package transfer
import (
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/pkg/transfer/local"
"github.com/containerd/containerd/plugin"
// Load packages with type registrations
_ "github.com/containerd/containerd/pkg/transfer/archive"
_ "github.com/containerd/containerd/pkg/transfer/image"
)
func init() {
plugin.Register(&plugin.Registration{
Type: plugin.TransferPlugin,
ID: "local",
Requires: []plugin.Type{
plugin.LeasePlugin,
plugin.MetadataPlugin,
},
Config: &transferConfig{},
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
m, err := ic.Get(plugin.MetadataPlugin)
if err != nil {
return nil, err
}
ms := m.(*metadata.DB)
l, err := ic.Get(plugin.LeasePlugin)
if err != nil {
return nil, err
}
return local.NewTransferService(l.(leases.Manager), ms.ContentStore(), metadata.NewImageStore(ms)), nil
},
})
}
type transferConfig struct {
// Max concurrent downloads
// Snapshotter platforms
}

View File

@ -100,20 +100,21 @@ func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc
case images.MediaTypeDockerSchema1Manifest:
return nil, fmt.Errorf("%v not supported", desc.MediaType)
default:
err := fetch(ctx, ingester, fetcher, desc)
err := Fetch(ctx, ingester, fetcher, desc)
if errdefs.IsAlreadyExists(err) {
return nil, nil
}
return nil, err
}
}
}
func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error {
// Fetch fetches the given digest into the provided ingester
func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error {
log.G(ctx).Debug("fetch")
cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc))
if err != nil {
if errdefs.IsAlreadyExists(err) {
return nil
}
return err
}
defer cw.Close()
@ -135,7 +136,7 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc
if err != nil && !errdefs.IsAlreadyExists(err) {
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
}
return nil
return err
}
rc, err := fetcher.Fetch(ctx, desc)
@ -200,14 +201,19 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc
func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error {
var m sync.Mutex
manifestStack := []ocispec.Descriptor{}
manifests := []ocispec.Descriptor{}
indexStack := []ocispec.Descriptor{}
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
m.Lock()
manifestStack = append(manifestStack, desc)
manifests = append(manifests, desc)
m.Unlock()
return nil, images.ErrStopHandler
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
m.Lock()
indexStack = append(indexStack, desc)
m.Unlock()
return nil, images.ErrStopHandler
default:
@ -234,16 +240,18 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st
return err
}
if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil {
return err
}
// Iterate in reverse order as seen, parent always uploaded after child
for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i])
for i := len(indexStack) - 1; i >= 0; i-- {
err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i])
if err != nil {
// TODO(estesp): until we have a more complete method for index push, we need to report
// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
// as a marker for this problem
if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") {
if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") {
return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err)
}
return err

View File

@ -37,4 +37,6 @@ const (
SandboxStoreService = "sandbox-store-service"
// SandboxControllerService is the id of Sandbox's controller service
SandboxControllerService = "sandbox-controller-service"
// Streaming service is the id of the streaming service
StreamingService = "streaming-service"
)

View File

@ -0,0 +1,124 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package streaming
import (
"errors"
"io"
api "github.com/containerd/containerd/api/services/streaming/v1"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/protobuf"
ptypes "github.com/containerd/containerd/protobuf/types"
"github.com/containerd/typeurl"
"google.golang.org/grpc"
)
func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "streaming",
Requires: []plugin.Type{
plugin.StreamingPlugin,
},
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
i, err := ic.GetByID(plugin.StreamingPlugin, "manager")
if err != nil {
return nil, err
}
return &service{manager: i.(streaming.StreamManager)}, nil
},
})
}
type service struct {
manager streaming.StreamManager
api.UnimplementedStreamingServer
}
func (s *service) Register(server *grpc.Server) error {
api.RegisterStreamingServer(server, s)
return nil
}
func (s *service) Stream(srv api.Streaming_StreamServer) error {
// TODO: Timeout waiting
a, err := srv.Recv()
if err != nil {
return err
}
var i api.StreamInit
if err := typeurl.UnmarshalTo(a, &i); err != nil {
return err
}
// TODO: Save this response to avoid marshaling everytime
response, err := typeurl.MarshalAny(&ptypes.Empty{})
if err != nil {
return err
}
if err := srv.Send(protobuf.FromAny(response)); err != nil {
return err
}
cc := make(chan struct{})
ss := &serviceStream{
s: srv,
cc: cc,
}
log.G(srv.Context()).WithField("stream", i.ID).Debug("registering stream")
if err := s.manager.Register(srv.Context(), i.ID, ss); err != nil {
return err
}
select {
case <-srv.Context().Done():
// TODO: Should return error if not cancelled?
case <-cc:
}
return nil
}
type serviceStream struct {
s api.Streaming_StreamServer
cc chan struct{}
}
func (ss *serviceStream) Send(a typeurl.Any) error {
return errdefs.FromGRPC(ss.s.Send(protobuf.FromAny(a)))
}
func (ss *serviceStream) Recv() (a typeurl.Any, err error) {
a, err = ss.s.Recv()
if !errors.Is(err, io.EOF) {
err = errdefs.FromGRPC(err)
}
return
}
func (ss *serviceStream) Close() error {
select {
case <-ss.cc:
default:
close(ss.cc)
}
return nil
}

View File

@ -0,0 +1,157 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package transfer
import (
"context"
transferapi "github.com/containerd/containerd/api/services/transfer/v1"
transferTypes "github.com/containerd/containerd/api/types/transfer"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/pkg/transfer/plugins"
"github.com/containerd/containerd/plugin"
ptypes "github.com/containerd/containerd/protobuf/types"
"github.com/containerd/typeurl"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
func init() {
plugin.Register(&plugin.Registration{
Type: plugin.GRPCPlugin,
ID: "transfer",
Requires: []plugin.Type{
plugin.TransferPlugin,
plugin.StreamingPlugin,
},
InitFn: newService,
})
}
type service struct {
transferrers []transfer.Transferrer
streamManager streaming.StreamManager
transferapi.UnimplementedTransferServer
}
func newService(ic *plugin.InitContext) (interface{}, error) {
plugins, err := ic.GetByType(plugin.TransferPlugin)
if err != nil {
return nil, err
}
// TODO: how to determine order?
t := make([]transfer.Transferrer, 0, len(plugins))
for _, p := range plugins {
i, err := p.Instance()
if err != nil {
return nil, err
}
t = append(t, i.(transfer.Transferrer))
}
sp, err := ic.GetByID(plugin.StreamingPlugin, "manager")
if err != nil {
return nil, err
}
return &service{
transferrers: t,
streamManager: sp.(streaming.StreamManager),
}, nil
}
func (s *service) Register(gs *grpc.Server) error {
transferapi.RegisterTransferServer(gs, s)
return nil
}
func (s *service) Transfer(ctx context.Context, req *transferapi.TransferRequest) (*emptypb.Empty, error) {
var transferOpts []transfer.Opt
if req.Options != nil {
if req.Options.ProgressStream != "" {
stream, err := s.streamManager.Get(ctx, req.Options.ProgressStream)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
defer stream.Close()
pf := func(p transfer.Progress) {
any, err := typeurl.MarshalAny(&transferTypes.Progress{
Event: p.Event,
Name: p.Name,
Parents: p.Parents,
Progress: p.Progress,
Total: p.Total,
})
if err != nil {
log.G(ctx).WithError(err).Warnf("event could not be marshaled: %v/%v", p.Event, p.Name)
return
}
if err := stream.Send(any); err != nil {
log.G(ctx).WithError(err).Warnf("event not sent: %v/%v", p.Event, p.Name)
return
}
}
transferOpts = append(transferOpts, transfer.WithProgress(pf))
}
}
src, err := s.convertAny(ctx, req.Source)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
dst, err := s.convertAny(ctx, req.Destination)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
for _, t := range s.transferrers {
if err := t.Transfer(ctx, src, dst, transferOpts...); err == nil {
return &ptypes.Empty{}, nil
} else if !errdefs.IsNotImplemented(err) {
return nil, errdefs.ToGRPC(err)
}
}
return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented for %s to %s", req.Source.GetTypeUrl(), req.Destination.GetTypeUrl())
}
func (s *service) convertAny(ctx context.Context, a typeurl.Any) (interface{}, error) {
obj, err := plugins.ResolveType(a)
if err != nil {
if errdefs.IsNotFound(err) {
return typeurl.UnmarshalAny(a)
}
return nil, err
}
switch v := obj.(type) {
case streamUnmarshaler:
err = v.UnmarshalAny(ctx, s.streamManager, a)
return obj, err
default:
log.G(ctx).Debug("unmarshling to..")
err = typeurl.UnmarshalTo(a, obj)
return obj, err
}
}
type streamUnmarshaler interface {
UnmarshalAny(context.Context, streaming.StreamGetter, typeurl.Any) error
}

86
transfer.go Normal file
View File

@ -0,0 +1,86 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containerd
import (
"context"
streamingapi "github.com/containerd/containerd/api/services/streaming/v1"
transferapi "github.com/containerd/containerd/api/services/transfer/v1"
"github.com/containerd/containerd/pkg/streaming"
"github.com/containerd/containerd/pkg/transfer"
"github.com/containerd/containerd/pkg/transfer/proxy"
"github.com/containerd/containerd/protobuf"
"github.com/containerd/typeurl"
)
func (c *Client) Transfer(ctx context.Context, src interface{}, dest interface{}, opts ...transfer.Opt) error {
return proxy.NewTransferrer(transferapi.NewTransferClient(c.conn), c.streamCreator()).Transfer(ctx, src, dest, opts...)
}
func (c *Client) streamCreator() streaming.StreamCreator {
return &streamCreator{
client: streamingapi.NewStreamingClient(c.conn),
}
}
type streamCreator struct {
client streamingapi.StreamingClient
}
func (sc *streamCreator) Create(ctx context.Context, id string) (streaming.Stream, error) {
stream, err := sc.client.Stream(ctx)
if err != nil {
return nil, err
}
a, err := typeurl.MarshalAny(&streamingapi.StreamInit{
ID: id,
})
if err != nil {
return nil, err
}
err = stream.Send(protobuf.FromAny(a))
if err != nil {
return nil, err
}
// Receive an ack that stream is init and ready
if _, err = stream.Recv(); err != nil {
return nil, err
}
return &clientStream{
s: stream,
}, nil
}
type clientStream struct {
s streamingapi.Streaming_StreamClient
}
func (cs *clientStream) Send(a typeurl.Any) error {
return cs.s.Send(protobuf.FromAny(a))
}
func (cs *clientStream) Recv() (typeurl.Any, error) {
return cs.s.Recv()
}
func (cs *clientStream) Close() error {
return cs.s.CloseSend()
}