commit
372cdfac3b
@ -2193,6 +2193,13 @@ file {
|
||||
type: TYPE_STRING
|
||||
json_name: "name"
|
||||
}
|
||||
field {
|
||||
name: "sync"
|
||||
number: 2
|
||||
label: LABEL_OPTIONAL
|
||||
type: TYPE_BOOL
|
||||
json_name: "sync"
|
||||
}
|
||||
}
|
||||
service {
|
||||
name: "Images"
|
||||
|
@ -163,6 +163,11 @@ func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptorI
|
||||
|
||||
type DeleteImageRequest struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Sync indicates that the delete and cleanup should be done
|
||||
// synchronously before returning to the caller
|
||||
//
|
||||
// Default is false
|
||||
Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DeleteImageRequest) Reset() { *m = DeleteImageRequest{} }
|
||||
@ -717,6 +722,16 @@ func (m *DeleteImageRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if m.Sync {
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
if m.Sync {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -840,6 +855,9 @@ func (m *DeleteImageRequest) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovImages(uint64(l))
|
||||
}
|
||||
if m.Sync {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -967,6 +985,7 @@ func (this *DeleteImageRequest) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&DeleteImageRequest{`,
|
||||
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||
`Sync:` + fmt.Sprintf("%v", this.Sync) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -1999,6 +2018,26 @@ func (m *DeleteImageRequest) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowImages
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Sync = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipImages(dAtA[iNdEx:])
|
||||
@ -2130,46 +2169,47 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptorImages = []byte{
|
||||
// 650 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0xd3, 0x4e,
|
||||
0x10, 0x8d, 0x93, 0xd4, 0x6d, 0x27, 0x87, 0x5f, 0x7f, 0x4b, 0x85, 0x2c, 0x03, 0x69, 0x14, 0x81,
|
||||
0x94, 0x0b, 0x6b, 0x1a, 0x2e, 0xd0, 0x4a, 0x88, 0xa6, 0x2d, 0x05, 0xa9, 0x70, 0x30, 0xff, 0x2a,
|
||||
0x2e, 0xd5, 0x26, 0x99, 0x18, 0x2b, 0x76, 0x6c, 0xbc, 0x9b, 0x48, 0xb9, 0xf1, 0x11, 0x90, 0xe0,
|
||||
0x43, 0xf5, 0xc8, 0x91, 0x13, 0xd0, 0x1c, 0xf8, 0x1c, 0xc8, 0xbb, 0x1b, 0x9a, 0x26, 0x11, 0x6e,
|
||||
0x4a, 0x6f, 0xe3, 0xf8, 0xbd, 0x79, 0x33, 0x6f, 0x66, 0x62, 0xd8, 0xf3, 0x7c, 0xf1, 0xbe, 0xdf,
|
||||
0xa4, 0xad, 0x28, 0x74, 0x5a, 0x51, 0x4f, 0x30, 0xbf, 0x87, 0x49, 0x7b, 0x32, 0x64, 0xb1, 0xef,
|
||||
0x70, 0x4c, 0x06, 0x7e, 0x0b, 0xb9, 0xe3, 0x87, 0xcc, 0x43, 0xee, 0x0c, 0x36, 0x75, 0x44, 0xe3,
|
||||
0x24, 0x12, 0x11, 0xb9, 0x75, 0x86, 0xa7, 0x63, 0x2c, 0xd5, 0x88, 0xc1, 0xa6, 0xbd, 0xee, 0x45,
|
||||
0x5e, 0x24, 0x91, 0x4e, 0x1a, 0x29, 0x92, 0x7d, 0xc3, 0x8b, 0x22, 0x2f, 0x40, 0x47, 0x3e, 0x35,
|
||||
0xfb, 0x1d, 0x07, 0xc3, 0x58, 0x0c, 0xf5, 0xcb, 0xca, 0xf4, 0xcb, 0x8e, 0x8f, 0x41, 0xfb, 0x38,
|
||||
0x64, 0xbc, 0xab, 0x11, 0x1b, 0xd3, 0x08, 0xe1, 0x87, 0xc8, 0x05, 0x0b, 0x63, 0x0d, 0xd8, 0xbe,
|
||||
0x50, 0x6b, 0x62, 0x18, 0x23, 0x77, 0xda, 0xc8, 0x5b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xea,
|
||||
0xaf, 0x3c, 0x2c, 0x3d, 0x4b, 0x1b, 0x20, 0x04, 0x8a, 0x3d, 0x16, 0xa2, 0x65, 0x54, 0x8c, 0xda,
|
||||
0xaa, 0x2b, 0x63, 0xf2, 0x14, 0xcc, 0x80, 0x35, 0x31, 0xe0, 0x56, 0xbe, 0x52, 0xa8, 0x95, 0xea,
|
||||
0xf7, 0xe8, 0x5f, 0x0d, 0xa0, 0x32, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x9e, 0x48, 0x86, 0xae, 0xe6,
|
||||
0x93, 0x2d, 0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xa1, 0x62, 0xd4, 0x4a, 0xf5, 0x9b, 0x93, 0x99,
|
||||
0x64, 0x6d, 0x74, 0xef, 0x4f, 0x6d, 0x8d, 0xe2, 0xc9, 0xf7, 0x8d, 0x9c, 0xab, 0x19, 0x64, 0x17,
|
||||
0xa0, 0x95, 0x20, 0x13, 0xd8, 0x3e, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x5b, 0xe8, 0xd8,
|
||||
0x16, 0xfa, 0x6a, 0x6c, 0x4b, 0x63, 0x25, 0x65, 0x7f, 0xfa, 0xb1, 0x61, 0xb8, 0xab, 0x9a, 0xb7,
|
||||
0x23, 0x93, 0xf4, 0xe3, 0xf6, 0x38, 0xc9, 0xca, 0x22, 0x49, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1,
|
||||
0x34, 0xd1, 0x1c, 0x59, 0x83, 0x42, 0x17, 0x87, 0xda, 0xb1, 0x34, 0x24, 0xeb, 0xb0, 0x34, 0x60,
|
||||
0x41, 0x1f, 0xad, 0xbc, 0xfc, 0x4d, 0x3d, 0x6c, 0xe5, 0x1f, 0x18, 0xd5, 0x3b, 0xf0, 0xdf, 0x01,
|
||||
0x0a, 0x69, 0x90, 0x8b, 0x1f, 0xfa, 0xc8, 0xc5, 0x3c, 0xc7, 0xab, 0x2f, 0x60, 0xed, 0x0c, 0xc6,
|
||||
0xe3, 0xa8, 0xc7, 0x91, 0x6c, 0xc1, 0x92, 0xb4, 0x58, 0x02, 0x4b, 0xf5, 0xdb, 0x17, 0x19, 0x82,
|
||||
0xab, 0x28, 0xd5, 0x37, 0x40, 0x76, 0xa5, 0x07, 0xe7, 0x94, 0x1f, 0x5f, 0x22, 0xa3, 0x1e, 0x8a,
|
||||
0xce, 0xfb, 0x16, 0xae, 0x9d, 0xcb, 0xab, 0x4b, 0xfd, 0xf7, 0xc4, 0x9f, 0x0d, 0x20, 0xaf, 0xa5,
|
||||
0xe1, 0x57, 0x5b, 0x31, 0xd9, 0x86, 0x92, 0x1a, 0xa4, 0x3c, 0x2e, 0x39, 0xa0, 0x79, 0x1b, 0xf0,
|
||||
0x24, 0xbd, 0xbf, 0xe7, 0x8c, 0x77, 0x5d, 0xbd, 0x2f, 0x69, 0x9c, 0xb6, 0x7b, 0xae, 0xa8, 0x2b,
|
||||
0x6b, 0xf7, 0x2e, 0xfc, 0x7f, 0xe8, 0x73, 0x35, 0x70, 0x3e, 0x6e, 0xd6, 0x82, 0xe5, 0x8e, 0x1f,
|
||||
0x08, 0x4c, 0xb8, 0x65, 0x54, 0x0a, 0xb5, 0x55, 0x77, 0xfc, 0x58, 0x3d, 0x02, 0x32, 0x09, 0xd7,
|
||||
0x65, 0x34, 0xc0, 0x54, 0x22, 0x12, 0xbe, 0x58, 0x1d, 0x9a, 0x59, 0xad, 0x01, 0xd9, 0xc3, 0x00,
|
||||
0xa7, 0x6c, 0x9f, 0xb3, 0xa2, 0xf5, 0x2f, 0x45, 0x30, 0x55, 0x01, 0xa4, 0x03, 0x85, 0x03, 0x14,
|
||||
0x84, 0x66, 0xe8, 0x4d, 0x2d, 0xbe, 0xed, 0x5c, 0x18, 0xaf, 0x1b, 0xec, 0x42, 0x31, 0x6d, 0x9b,
|
||||
0x64, 0xfd, 0xff, 0xcc, 0x58, 0x69, 0x6f, 0x2e, 0xc0, 0xd0, 0x62, 0x11, 0x98, 0x6a, 0xb5, 0x49,
|
||||
0x16, 0x79, 0xf6, 0xb2, 0xec, 0xfa, 0x22, 0x94, 0x33, 0x41, 0xb5, 0x5c, 0x99, 0x82, 0xb3, 0x87,
|
||||
0x91, 0x29, 0x38, 0x6f, 0x6d, 0x5f, 0x82, 0xa9, 0x66, 0x9d, 0x29, 0x38, 0xbb, 0x12, 0xf6, 0xf5,
|
||||
0x99, 0x93, 0xd9, 0x4f, 0xbf, 0x67, 0x8d, 0xa3, 0x93, 0xd3, 0x72, 0xee, 0xdb, 0x69, 0x39, 0xf7,
|
||||
0x71, 0x54, 0x36, 0x4e, 0x46, 0x65, 0xe3, 0xeb, 0xa8, 0x6c, 0xfc, 0x1c, 0x95, 0x8d, 0x77, 0x8f,
|
||||
0x2e, 0xf9, 0xed, 0xdd, 0x56, 0xd1, 0x51, 0xae, 0x69, 0x4a, 0xad, 0xfb, 0xbf, 0x03, 0x00, 0x00,
|
||||
0xff, 0xff, 0x86, 0xe6, 0x32, 0x0a, 0xc6, 0x07, 0x00, 0x00,
|
||||
// 659 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0x8e, 0x93, 0xd4, 0x6d, 0x27, 0x07, 0xca, 0x52, 0x21, 0xcb, 0x40, 0x1a, 0x45, 0x20, 0xe5,
|
||||
0xc2, 0x9a, 0x86, 0x0b, 0xb4, 0x08, 0xd1, 0xb4, 0xa5, 0x20, 0x15, 0x0e, 0xe6, 0xaf, 0xe2, 0x52,
|
||||
0x6d, 0x92, 0x89, 0xb1, 0x62, 0xc7, 0xc6, 0xbb, 0x89, 0x94, 0x1b, 0x8f, 0x80, 0x04, 0x0f, 0xd5,
|
||||
0x23, 0x47, 0x4e, 0x40, 0x73, 0xe0, 0x39, 0x90, 0x77, 0x37, 0x34, 0x4d, 0x22, 0x92, 0x94, 0xde,
|
||||
0x66, 0xed, 0xef, 0x9b, 0x9f, 0x6f, 0x66, 0x76, 0x61, 0xcf, 0xf3, 0xc5, 0x87, 0x6e, 0x9d, 0x36,
|
||||
0xa2, 0xd0, 0x69, 0x44, 0x1d, 0xc1, 0xfc, 0x0e, 0x26, 0xcd, 0x51, 0x93, 0xc5, 0xbe, 0xc3, 0x31,
|
||||
0xe9, 0xf9, 0x0d, 0xe4, 0x8e, 0x1f, 0x32, 0x0f, 0xb9, 0xd3, 0xdb, 0xd4, 0x16, 0x8d, 0x93, 0x48,
|
||||
0x44, 0xe4, 0xd6, 0x19, 0x9e, 0x0e, 0xb1, 0x54, 0x23, 0x7a, 0x9b, 0xf6, 0xba, 0x17, 0x79, 0x91,
|
||||
0x44, 0x3a, 0xa9, 0xa5, 0x48, 0xf6, 0x0d, 0x2f, 0x8a, 0xbc, 0x00, 0x1d, 0x79, 0xaa, 0x77, 0x5b,
|
||||
0x0e, 0x86, 0xb1, 0xe8, 0xeb, 0x9f, 0xa5, 0xf1, 0x9f, 0x2d, 0x1f, 0x83, 0xe6, 0x71, 0xc8, 0x78,
|
||||
0x5b, 0x23, 0x36, 0xc6, 0x11, 0xc2, 0x0f, 0x91, 0x0b, 0x16, 0xc6, 0x1a, 0xb0, 0x3d, 0x57, 0x69,
|
||||
0xa2, 0x1f, 0x23, 0x77, 0x9a, 0xc8, 0x1b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xf2, 0xef, 0x2c,
|
||||
0x2c, 0x3d, 0x4f, 0x0b, 0x20, 0x04, 0xf2, 0x1d, 0x16, 0xa2, 0x65, 0x94, 0x8c, 0xca, 0xaa, 0x2b,
|
||||
0x6d, 0xf2, 0x0c, 0xcc, 0x80, 0xd5, 0x31, 0xe0, 0x56, 0xb6, 0x94, 0xab, 0x14, 0xaa, 0xf7, 0xe8,
|
||||
0x3f, 0x05, 0xa0, 0xd2, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x8e, 0x48, 0xfa, 0xae, 0xe6, 0x93, 0x2d,
|
||||
0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xae, 0x64, 0x54, 0x0a, 0xd5, 0x9b, 0xa3, 0x9e, 0x64, 0x6e,
|
||||
0x74, 0xef, 0x6f, 0x6e, 0xb5, 0xfc, 0xc9, 0x8f, 0x8d, 0x8c, 0xab, 0x19, 0x64, 0x17, 0xa0, 0x91,
|
||||
0x20, 0x13, 0xd8, 0x3c, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x59, 0xe8, 0x50, 0x16, 0xfa,
|
||||
0x7a, 0x28, 0x4b, 0x6d, 0x25, 0x65, 0x7f, 0xfe, 0xb9, 0x61, 0xb8, 0xab, 0x9a, 0xb7, 0x23, 0x9d,
|
||||
0x74, 0xe3, 0xe6, 0xd0, 0xc9, 0xca, 0x22, 0x4e, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1, 0x30, 0x52,
|
||||
0x1c, 0x59, 0x83, 0x5c, 0x1b, 0xfb, 0x5a, 0xb1, 0xd4, 0x24, 0xeb, 0xb0, 0xd4, 0x63, 0x41, 0x17,
|
||||
0xad, 0xac, 0xfc, 0xa6, 0x0e, 0x5b, 0xd9, 0x07, 0x46, 0xf9, 0x0e, 0x5c, 0x39, 0x40, 0x21, 0x05,
|
||||
0x72, 0xf1, 0x63, 0x17, 0xb9, 0x98, 0xa6, 0x78, 0xf9, 0x25, 0xac, 0x9d, 0xc1, 0x78, 0x1c, 0x75,
|
||||
0x38, 0x92, 0x2d, 0x58, 0x92, 0x12, 0x4b, 0x60, 0xa1, 0x7a, 0x7b, 0x9e, 0x26, 0xb8, 0x8a, 0x52,
|
||||
0x7e, 0x0b, 0x64, 0x57, 0x6a, 0x70, 0x2e, 0xf2, 0x93, 0x0b, 0x78, 0xd4, 0x4d, 0xd1, 0x7e, 0xdf,
|
||||
0xc1, 0xb5, 0x73, 0x7e, 0x75, 0xaa, 0xff, 0xef, 0xf8, 0x8b, 0x01, 0xe4, 0x8d, 0x14, 0xfc, 0x72,
|
||||
0x33, 0x26, 0xdb, 0x50, 0x50, 0x8d, 0x94, 0xcb, 0x25, 0x1b, 0x34, 0x6d, 0x02, 0x9e, 0xa6, 0xfb,
|
||||
0xf7, 0x82, 0xf1, 0xb6, 0xab, 0xe7, 0x25, 0xb5, 0xd3, 0x72, 0xcf, 0x25, 0x75, 0x69, 0xe5, 0xde,
|
||||
0x85, 0xab, 0x87, 0x3e, 0x57, 0x0d, 0xe7, 0xc3, 0x62, 0x2d, 0x58, 0x6e, 0xf9, 0x81, 0xc0, 0x84,
|
||||
0x5b, 0x46, 0x29, 0x57, 0x59, 0x75, 0x87, 0xc7, 0xf2, 0x11, 0x90, 0x51, 0xb8, 0x4e, 0xa3, 0x06,
|
||||
0xa6, 0x0a, 0x22, 0xe1, 0x8b, 0xe5, 0xa1, 0x99, 0xe5, 0x47, 0x40, 0xf6, 0x30, 0xc0, 0x31, 0xd9,
|
||||
0xa7, 0x5d, 0x0a, 0x04, 0xf2, 0xbc, 0xdf, 0x69, 0x48, 0x05, 0x57, 0x5c, 0x69, 0x57, 0xbf, 0xe6,
|
||||
0xc1, 0x54, 0x49, 0x91, 0x16, 0xe4, 0x0e, 0x50, 0x10, 0x3a, 0x23, 0x87, 0xb1, 0x65, 0xb0, 0x9d,
|
||||
0xb9, 0xf1, 0xba, 0xe8, 0x36, 0xe4, 0x53, 0x29, 0xc8, 0xac, 0x3b, 0x69, 0x42, 0x5e, 0x7b, 0x73,
|
||||
0x01, 0x86, 0x0e, 0x16, 0x81, 0xa9, 0xc6, 0x9d, 0xcc, 0x22, 0x4f, 0x6e, 0x9b, 0x5d, 0x5d, 0x84,
|
||||
0x72, 0x16, 0x50, 0x0d, 0xdc, 0xcc, 0x80, 0x93, 0xcb, 0x32, 0x33, 0xe0, 0xb4, 0x51, 0x7e, 0x05,
|
||||
0xa6, 0xea, 0xff, 0xcc, 0x80, 0x93, 0x63, 0x62, 0x5f, 0x9f, 0x58, 0xa3, 0xfd, 0xf4, 0x8d, 0xab,
|
||||
0x1d, 0x9d, 0x9c, 0x16, 0x33, 0xdf, 0x4f, 0x8b, 0x99, 0x4f, 0x83, 0xa2, 0x71, 0x32, 0x28, 0x1a,
|
||||
0xdf, 0x06, 0x45, 0xe3, 0xd7, 0xa0, 0x68, 0xbc, 0x7f, 0x7c, 0xc1, 0xf7, 0x78, 0x5b, 0x59, 0x47,
|
||||
0x99, 0xba, 0x29, 0x63, 0xdd, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x24, 0x4e, 0xca, 0x64, 0xda,
|
||||
0x07, 0x00, 0x00,
|
||||
}
|
||||
|
@ -115,4 +115,10 @@ message ListImagesResponse {
|
||||
|
||||
message DeleteImageRequest {
|
||||
string name = 1;
|
||||
|
||||
// Sync indicates that the delete and cleanup should be done
|
||||
// synchronously before returning to the caller
|
||||
//
|
||||
// Default is false
|
||||
bool sync = 2;
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package main
|
||||
// register containerd builtins here
|
||||
import (
|
||||
_ "github.com/containerd/containerd/diff/walking"
|
||||
_ "github.com/containerd/containerd/gc/scheduler"
|
||||
_ "github.com/containerd/containerd/services/containers"
|
||||
_ "github.com/containerd/containerd/services/content"
|
||||
_ "github.com/containerd/containerd/services/diff"
|
||||
|
@ -270,8 +270,14 @@ var removeCommand = cli.Command{
|
||||
Name: "remove",
|
||||
Aliases: []string{"rm"},
|
||||
Usage: "remove one or more images by reference",
|
||||
ArgsUsage: "<ref> [<ref>, ...]",
|
||||
ArgsUsage: "[flags] <ref> [<ref>, ...]",
|
||||
Description: "remove one or more images by reference",
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "sync",
|
||||
Usage: "Synchronously remove image and all associated resources",
|
||||
},
|
||||
},
|
||||
Action: func(context *cli.Context) error {
|
||||
client, ctx, cancel, err := commands.NewClient(context)
|
||||
if err != nil {
|
||||
@ -282,8 +288,12 @@ var removeCommand = cli.Command{
|
||||
exitErr error
|
||||
imageStore = client.ImageService()
|
||||
)
|
||||
for _, target := range context.Args() {
|
||||
if err := imageStore.Delete(ctx, target); err != nil {
|
||||
for i, target := range context.Args() {
|
||||
var opts []images.DeleteOpt
|
||||
if context.Bool("sync") && i == context.NArg()-1 {
|
||||
opts = append(opts, images.SynchronousDelete())
|
||||
}
|
||||
if err := imageStore.Delete(ctx, target, opts...); err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
if exitErr == nil {
|
||||
exitErr = errors.Wrapf(err, "unable to delete %v", target)
|
||||
|
@ -52,6 +52,10 @@ func makeTest(t *testing.T, name string, storeFn func(ctx context.Context, root
|
||||
}
|
||||
}
|
||||
|
||||
var labels = map[string]string{
|
||||
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
func checkContentStoreWriter(ctx context.Context, t *testing.T, cs content.Store) {
|
||||
c1, d1 := createContent(256, 1)
|
||||
w1, err := cs.Writer(ctx, "c1", 0, "")
|
||||
@ -118,7 +122,7 @@ func checkContentStoreWriter(ctx context.Context, t *testing.T, cs content.Store
|
||||
}
|
||||
|
||||
preCommit := time.Now()
|
||||
if err := s.writer.Commit(ctx, 0, ""); err != nil {
|
||||
if err := s.writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
postCommit := time.Now()
|
||||
@ -130,6 +134,7 @@ func checkContentStoreWriter(ctx context.Context, t *testing.T, cs content.Store
|
||||
info := content.Info{
|
||||
Digest: s.digest,
|
||||
Size: int64(len(s.content)),
|
||||
Labels: labels,
|
||||
}
|
||||
if err := checkInfo(ctx, cs, s.digest, info, preCommit, postCommit, preCommit, postCommit); err != nil {
|
||||
t.Fatalf("Check info failed: %+v", err)
|
||||
@ -264,7 +269,7 @@ func checkUploadStatus(ctx context.Context, t *testing.T, cs content.Store) {
|
||||
checkStatus(t, w1, expected, d1, preStart, postStart, preUpdate, postUpdate)
|
||||
|
||||
preCommit := time.Now()
|
||||
if err := w1.Commit(ctx, 0, ""); err != nil {
|
||||
if err := w1.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
|
||||
t.Fatalf("Commit failed: %+v", err)
|
||||
}
|
||||
postCommit := time.Now()
|
||||
@ -272,6 +277,7 @@ func checkUploadStatus(ctx context.Context, t *testing.T, cs content.Store) {
|
||||
info := content.Info{
|
||||
Digest: d1,
|
||||
Size: 256,
|
||||
Labels: labels,
|
||||
}
|
||||
|
||||
if err := checkInfo(ctx, cs, d1, info, preCommit, postCommit, preCommit, postCommit); err != nil {
|
||||
@ -292,9 +298,11 @@ func checkLabels(ctx context.Context, t *testing.T, cs content.Store) {
|
||||
t.Fatalf("Failed to write: %+v", err)
|
||||
}
|
||||
|
||||
rootTime := time.Now().UTC().Format(time.RFC3339)
|
||||
labels := map[string]string{
|
||||
"k1": "v1",
|
||||
"k2": "v2",
|
||||
"containerd.io/gc.root": rootTime,
|
||||
}
|
||||
|
||||
preCommit := time.Now()
|
||||
@ -330,6 +338,7 @@ func checkLabels(ctx context.Context, t *testing.T, cs content.Store) {
|
||||
|
||||
info.Labels = map[string]string{
|
||||
"k1": "v1",
|
||||
"containerd.io/gc.root": rootTime,
|
||||
}
|
||||
preUpdate = time.Now()
|
||||
if _, err := cs.Update(ctx, info, "labels.k3", "labels.k1"); err != nil {
|
||||
|
332
gc/scheduler/scheduler.go
Normal file
332
gc/scheduler/scheduler.go
Normal file
@ -0,0 +1,332 @@
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
)
|
||||
|
||||
// config configures the garbage collection policies.
|
||||
type config struct {
|
||||
// PauseThreshold represents the maximum amount of time garbage
|
||||
// collection should be scheduled based on the average pause time.
|
||||
// For example, a value of 0.02 means that scheduled garbage collection
|
||||
// pauses should present at most 2% of real time,
|
||||
// or 20ms of every second.
|
||||
//
|
||||
// A maximum value of .5 is enforced to prevent over scheduling of the
|
||||
// garbage collector, trigger options are available to run in a more
|
||||
// predictable time frame after mutation.
|
||||
//
|
||||
// Default is 0.02
|
||||
PauseThreshold float64 `toml:"pause_threshold"`
|
||||
|
||||
// DeletionThreshold is used to guarantee that a garbage collection is
|
||||
// scheduled after configured number of deletions have occurred
|
||||
// since the previous garbage collection. A value of 0 indicates that
|
||||
// garbage collection will not be triggered by deletion count.
|
||||
//
|
||||
// Default 0
|
||||
DeletionThreshold int `toml:"deletion_threshold"`
|
||||
|
||||
// MutationThreshold is used to guarantee that a garbage collection is
|
||||
// run after a configured number of database mutations have occurred
|
||||
// since the previous garbage collection. A value of 0 indicates that
|
||||
// garbage collection will only be run after a manual trigger or
|
||||
// deletion. Unlike the deletion threshold, the mutation threshold does
|
||||
// not cause scheduling of a garbage collection, but ensures GC is run
|
||||
// at the next scheduled GC.
|
||||
//
|
||||
// Default 100
|
||||
MutationThreshold int `toml:"mutation_threshold"`
|
||||
|
||||
// ScheduleDelay is the duration in the future to schedule a garbage
|
||||
// collection triggered manually or by exceeding the configured
|
||||
// threshold for deletion or mutation. A zero value will immediately
|
||||
// schedule. Use suffix "ms" for millisecond and "s" for second.
|
||||
//
|
||||
// Default is "0ms"
|
||||
ScheduleDelay duration `toml:"schedule_delay"`
|
||||
|
||||
// StartupDelay is the delay duration to do an initial garbage
|
||||
// collection after startup. The initial garbage collection is used to
|
||||
// set the base for pause threshold and should be scheduled in the
|
||||
// future to avoid slowing down other startup processes. Use suffix
|
||||
// "ms" for millisecond and "s" for second.
|
||||
//
|
||||
// Default is "100ms"
|
||||
StartupDelay duration `toml:"startup_delay"`
|
||||
}
|
||||
|
||||
type duration time.Duration
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
ed, err := time.ParseDuration(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = duration(ed)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.GCPlugin,
|
||||
ID: "scheduler",
|
||||
Requires: []plugin.Type{
|
||||
plugin.MetadataPlugin,
|
||||
},
|
||||
Config: &config{
|
||||
PauseThreshold: 0.02,
|
||||
DeletionThreshold: 0,
|
||||
MutationThreshold: 100,
|
||||
ScheduleDelay: duration(0),
|
||||
StartupDelay: duration(100 * time.Millisecond),
|
||||
},
|
||||
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
||||
md, err := ic.Get(plugin.MetadataPlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := newScheduler(md.(*metadata.DB), ic.Config.(*config))
|
||||
|
||||
ic.Meta.Exports = map[string]string{
|
||||
"PauseThreshold": fmt.Sprint(m.pauseThreshold),
|
||||
"DeletionThreshold": fmt.Sprint(m.deletionThreshold),
|
||||
"MutationThreshold": fmt.Sprint(m.mutationThreshold),
|
||||
"ScheduleDelay": fmt.Sprint(m.scheduleDelay),
|
||||
}
|
||||
|
||||
go m.run(ic.Context)
|
||||
|
||||
return m, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type mutationEvent struct {
|
||||
ts time.Time
|
||||
mutation bool
|
||||
dirty bool
|
||||
}
|
||||
|
||||
type collector interface {
|
||||
RegisterMutationCallback(func(bool))
|
||||
GarbageCollect(context.Context) (metadata.GCStats, error)
|
||||
}
|
||||
|
||||
type gcScheduler struct {
|
||||
c collector
|
||||
|
||||
eventC chan mutationEvent
|
||||
|
||||
waiterL sync.Mutex
|
||||
waiters []chan metadata.GCStats
|
||||
|
||||
pauseThreshold float64
|
||||
deletionThreshold int
|
||||
mutationThreshold int
|
||||
scheduleDelay time.Duration
|
||||
startupDelay time.Duration
|
||||
}
|
||||
|
||||
func newScheduler(c collector, cfg *config) *gcScheduler {
|
||||
eventC := make(chan mutationEvent)
|
||||
|
||||
s := &gcScheduler{
|
||||
c: c,
|
||||
eventC: eventC,
|
||||
pauseThreshold: cfg.PauseThreshold,
|
||||
deletionThreshold: cfg.DeletionThreshold,
|
||||
mutationThreshold: cfg.MutationThreshold,
|
||||
scheduleDelay: time.Duration(cfg.ScheduleDelay),
|
||||
startupDelay: time.Duration(cfg.StartupDelay),
|
||||
}
|
||||
|
||||
if s.pauseThreshold < 0.0 {
|
||||
s.pauseThreshold = 0.0
|
||||
}
|
||||
if s.pauseThreshold > 0.5 {
|
||||
s.pauseThreshold = 0.5
|
||||
}
|
||||
if s.mutationThreshold < 0 {
|
||||
s.mutationThreshold = 0
|
||||
}
|
||||
if s.scheduleDelay < 0 {
|
||||
s.scheduleDelay = 0
|
||||
}
|
||||
if s.startupDelay < 0 {
|
||||
s.startupDelay = 0
|
||||
}
|
||||
|
||||
c.RegisterMutationCallback(s.mutationCallback)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *gcScheduler) ScheduleAndWait(ctx context.Context) (metadata.GCStats, error) {
|
||||
return s.wait(ctx, true)
|
||||
}
|
||||
|
||||
func (s *gcScheduler) wait(ctx context.Context, trigger bool) (metadata.GCStats, error) {
|
||||
wc := make(chan metadata.GCStats, 1)
|
||||
s.waiterL.Lock()
|
||||
s.waiters = append(s.waiters, wc)
|
||||
s.waiterL.Unlock()
|
||||
|
||||
if trigger {
|
||||
e := mutationEvent{
|
||||
ts: time.Now(),
|
||||
}
|
||||
go func() {
|
||||
s.eventC <- e
|
||||
}()
|
||||
}
|
||||
|
||||
var gcStats metadata.GCStats
|
||||
select {
|
||||
case stats, ok := <-wc:
|
||||
if !ok {
|
||||
return metadata.GCStats{}, errors.New("gc failed")
|
||||
}
|
||||
gcStats = stats
|
||||
case <-ctx.Done():
|
||||
return metadata.GCStats{}, ctx.Err()
|
||||
}
|
||||
|
||||
return gcStats, nil
|
||||
}
|
||||
|
||||
func (s *gcScheduler) mutationCallback(dirty bool) {
|
||||
e := mutationEvent{
|
||||
ts: time.Now(),
|
||||
mutation: true,
|
||||
dirty: dirty,
|
||||
}
|
||||
go func() {
|
||||
s.eventC <- e
|
||||
}()
|
||||
}
|
||||
|
||||
func schedule(d time.Duration) (<-chan time.Time, *time.Time) {
|
||||
next := time.Now().Add(d)
|
||||
return time.After(d), &next
|
||||
}
|
||||
|
||||
func (s *gcScheduler) run(ctx context.Context) {
|
||||
var (
|
||||
schedC <-chan time.Time
|
||||
|
||||
lastCollection *time.Time
|
||||
nextCollection *time.Time
|
||||
|
||||
interval = time.Second
|
||||
gcTime time.Duration
|
||||
collections int
|
||||
// TODO(dmcg): expose collection stats as metrics
|
||||
|
||||
triggered bool
|
||||
deletions int
|
||||
mutations int
|
||||
)
|
||||
if s.startupDelay > 0 {
|
||||
schedC, nextCollection = schedule(s.startupDelay)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-schedC:
|
||||
// Check if garbage collection can be skipped because
|
||||
// it is not needed or was not requested and reschedule
|
||||
// it to attempt again after another time interval.
|
||||
if !triggered && lastCollection != nil && deletions == 0 &&
|
||||
(s.mutationThreshold == 0 || mutations < s.mutationThreshold) {
|
||||
schedC, nextCollection = schedule(interval)
|
||||
continue
|
||||
}
|
||||
break
|
||||
case e := <-s.eventC:
|
||||
if lastCollection != nil && lastCollection.After(e.ts) {
|
||||
continue
|
||||
}
|
||||
if e.dirty {
|
||||
deletions++
|
||||
}
|
||||
if e.mutation {
|
||||
mutations++
|
||||
} else {
|
||||
triggered = true
|
||||
}
|
||||
|
||||
// Check if condition should cause immediate collection.
|
||||
if triggered ||
|
||||
(s.deletionThreshold > 0 && deletions >= s.deletionThreshold) ||
|
||||
(nextCollection == nil && ((s.deletionThreshold == 0 && deletions > 0) ||
|
||||
(s.mutationThreshold > 0 && mutations >= s.mutationThreshold))) {
|
||||
// Check if not already scheduled before delay threshold
|
||||
if nextCollection == nil || nextCollection.After(time.Now().Add(s.scheduleDelay)) {
|
||||
// TODO(dmcg): track re-schedules for tuning schedule config
|
||||
schedC, nextCollection = schedule(s.scheduleDelay)
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
s.waiterL.Lock()
|
||||
|
||||
stats, err := s.c.GarbageCollect(ctx)
|
||||
last := time.Now()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("garbage collection failed")
|
||||
|
||||
// Reschedule garbage collection for same duration + 1 second
|
||||
schedC, nextCollection = schedule(nextCollection.Sub(*lastCollection) + time.Second)
|
||||
|
||||
// Update last collection time even though failure occured
|
||||
lastCollection = &last
|
||||
|
||||
for _, w := range s.waiters {
|
||||
close(w)
|
||||
}
|
||||
s.waiters = nil
|
||||
s.waiterL.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
log.G(ctx).WithField("d", stats.MetaD).Debug("garbage collected")
|
||||
|
||||
gcTime += stats.MetaD
|
||||
collections++
|
||||
triggered = false
|
||||
deletions = 0
|
||||
mutations = 0
|
||||
|
||||
// Calculate new interval with updated times
|
||||
if s.pauseThreshold > 0.0 {
|
||||
// Set interval to average gc time divided by the pause threshold
|
||||
// This algorithm ensures that a gc is scheduled to allow enough
|
||||
// runtime in between gc to reach the pause threshold.
|
||||
// Pause threshold is always 0.0 < threshold <= 0.5
|
||||
avg := float64(gcTime) / float64(collections)
|
||||
interval = time.Duration(avg/s.pauseThreshold - avg)
|
||||
}
|
||||
|
||||
lastCollection = &last
|
||||
schedC, nextCollection = schedule(interval)
|
||||
|
||||
for _, w := range s.waiters {
|
||||
w <- stats
|
||||
}
|
||||
s.waiters = nil
|
||||
s.waiterL.Unlock()
|
||||
}
|
||||
}
|
188
gc/scheduler/scheduler_test.go
Normal file
188
gc/scheduler/scheduler_test.go
Normal file
@ -0,0 +1,188 @@
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/metadata"
|
||||
)
|
||||
|
||||
func TestPauseThreshold(t *testing.T) {
|
||||
cfg := &config{
|
||||
// With 100μs, gc should run about every 5ms
|
||||
PauseThreshold: 0.02,
|
||||
}
|
||||
tc := &testCollector{
|
||||
d: time.Microsecond * 100,
|
||||
}
|
||||
|
||||
scheduler := newScheduler(tc, cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
go scheduler.run(ctx)
|
||||
|
||||
// Ensure every possible GC cycle runs
|
||||
go func() {
|
||||
tick := time.NewTicker(time.Microsecond * 100)
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
tc.trigger(true)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(time.Millisecond * 15)
|
||||
if c := tc.runCount(); c < 3 || c > 4 {
|
||||
t.Fatalf("unexpected gc run count %d, expected between 5 and 6", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeletionThreshold(t *testing.T) {
|
||||
cfg := &config{
|
||||
// Prevent GC from scheduling again before check
|
||||
PauseThreshold: 0.001,
|
||||
DeletionThreshold: 5,
|
||||
}
|
||||
tc := &testCollector{
|
||||
d: time.Second,
|
||||
}
|
||||
|
||||
scheduler := newScheduler(tc, cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
go scheduler.run(ctx)
|
||||
|
||||
// Block until next GC finishes
|
||||
gcWait := make(chan struct{})
|
||||
go func() {
|
||||
scheduler.wait(ctx, false)
|
||||
close(gcWait)
|
||||
}()
|
||||
|
||||
// Increment deletion count 5, checking GC hasn't run in
|
||||
// between each call
|
||||
for i := 0; i < 5; i++ {
|
||||
time.Sleep(time.Millisecond)
|
||||
if c := tc.runCount(); c != 0 {
|
||||
t.Fatalf("GC ran unexpectedly")
|
||||
}
|
||||
tc.trigger(true)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-gcWait:
|
||||
case <-time.After(time.Millisecond * 10):
|
||||
t.Fatal("GC wait timed out")
|
||||
}
|
||||
|
||||
if c := tc.runCount(); c != 1 {
|
||||
t.Fatalf("unexpected gc run count %d, expected 1", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrigger(t *testing.T) {
|
||||
var (
|
||||
cfg = &config{}
|
||||
tc = &testCollector{
|
||||
d: time.Millisecond * 10,
|
||||
}
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
scheduler = newScheduler(tc, cfg)
|
||||
stats metadata.GCStats
|
||||
err error
|
||||
)
|
||||
|
||||
defer cancel()
|
||||
go scheduler.run(ctx)
|
||||
|
||||
// Block until next GC finishes
|
||||
gcWait := make(chan struct{})
|
||||
go func() {
|
||||
stats, err = scheduler.ScheduleAndWait(ctx)
|
||||
close(gcWait)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-gcWait:
|
||||
case <-time.After(time.Millisecond * 10):
|
||||
t.Fatal("GC wait timed out")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("GC failed: %#v", err)
|
||||
}
|
||||
|
||||
if stats.MetaD != tc.d {
|
||||
t.Fatalf("unexpected gc duration: %s, expected %d", stats.MetaD, tc.d)
|
||||
}
|
||||
|
||||
if c := tc.runCount(); c != 1 {
|
||||
t.Fatalf("unexpected gc run count %d, expected 1", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartupDelay(t *testing.T) {
|
||||
var (
|
||||
cfg = &config{
|
||||
// Prevent GC from scheduling again before check
|
||||
PauseThreshold: 0.001,
|
||||
StartupDelay: duration(time.Millisecond),
|
||||
}
|
||||
tc = &testCollector{
|
||||
d: time.Second,
|
||||
}
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
scheduler = newScheduler(tc, cfg)
|
||||
)
|
||||
defer cancel()
|
||||
go scheduler.run(ctx)
|
||||
|
||||
time.Sleep(time.Millisecond * 5)
|
||||
|
||||
if c := tc.runCount(); c != 1 {
|
||||
t.Fatalf("unexpected gc run count %d, expected 1", c)
|
||||
}
|
||||
}
|
||||
|
||||
type testCollector struct {
|
||||
d time.Duration
|
||||
gc int
|
||||
m sync.Mutex
|
||||
|
||||
callbacks []func(bool)
|
||||
}
|
||||
|
||||
func (tc *testCollector) trigger(delete bool) {
|
||||
for _, f := range tc.callbacks {
|
||||
f(delete)
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *testCollector) runCount() int {
|
||||
tc.m.Lock()
|
||||
c := tc.gc
|
||||
tc.m.Unlock()
|
||||
return c
|
||||
}
|
||||
|
||||
func (tc *testCollector) RegisterMutationCallback(f func(bool)) {
|
||||
tc.callbacks = append(tc.callbacks, f)
|
||||
}
|
||||
|
||||
func (tc *testCollector) GarbageCollect(context.Context) (metadata.GCStats, error) {
|
||||
tc.m.Lock()
|
||||
tc.gc++
|
||||
tc.m.Unlock()
|
||||
return metadata.GCStats{
|
||||
MetaD: tc.d,
|
||||
}, nil
|
||||
}
|
@ -74,9 +74,16 @@ func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpath
|
||||
return imageFromProto(&updated.Image), nil
|
||||
}
|
||||
|
||||
func (s *remoteImages) Delete(ctx context.Context, name string) error {
|
||||
func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error {
|
||||
var do images.DeleteOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, &do); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err := s.client.Delete(ctx, &imagesapi.DeleteImageRequest{
|
||||
Name: name,
|
||||
Sync: do.Synchronous,
|
||||
})
|
||||
|
||||
return errdefs.FromGRPC(err)
|
||||
|
@ -38,6 +38,23 @@ type Image struct {
|
||||
CreatedAt, UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// DeleteOptions provide options on image delete
|
||||
type DeleteOptions struct {
|
||||
Synchronous bool
|
||||
}
|
||||
|
||||
// DeleteOpt allows configuring a delete operation
|
||||
type DeleteOpt func(context.Context, *DeleteOptions) error
|
||||
|
||||
// SynchronousDelete is used to indicate that an image deletion and removal of
|
||||
// the image resources should occur synchronously before returning a result.
|
||||
func SynchronousDelete() DeleteOpt {
|
||||
return func(ctx context.Context, o *DeleteOptions) error {
|
||||
o.Synchronous = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Store and interact with images
|
||||
type Store interface {
|
||||
Get(ctx context.Context, name string) (Image, error)
|
||||
@ -48,7 +65,7 @@ type Store interface {
|
||||
// one or more fieldpaths are provided, only those fields will be updated.
|
||||
Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error)
|
||||
|
||||
Delete(ctx context.Context, name string) error
|
||||
Delete(ctx context.Context, name string, opts ...DeleteOpt) error
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Many of these functions make strong platform assumptions,
|
||||
|
@ -530,12 +530,14 @@ func writeInfo(info *content.Info, bkt *bolt.Bucket) error {
|
||||
return bkt.Put(bucketKeySize, sizeEncoded)
|
||||
}
|
||||
|
||||
func (cs *contentStore) garbageCollect(ctx context.Context) error {
|
||||
lt1 := time.Now()
|
||||
func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, err error) {
|
||||
cs.l.Lock()
|
||||
t1 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
d = time.Now().Sub(t1)
|
||||
}
|
||||
cs.l.Unlock()
|
||||
log.G(ctx).WithField("t", time.Now().Sub(lt1)).Debugf("content garbage collected")
|
||||
}()
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
@ -570,10 +572,10 @@ func (cs *contentStore) garbageCollect(ctx context.Context) error {
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return cs.Store.Walk(ctx, func(info content.Info) error {
|
||||
err = cs.Store.Walk(ctx, func(info content.Info) error {
|
||||
if _, ok := seen[info.Digest.String()]; !ok {
|
||||
if err := cs.Store.Delete(ctx, info.Digest); err != nil {
|
||||
return err
|
||||
@ -582,4 +584,5 @@ func (cs *contentStore) garbageCollect(ctx context.Context) error {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
108
metadata/db.go
108
metadata/db.go
@ -53,8 +53,9 @@ type DB struct {
|
||||
dirtySS map[string]struct{}
|
||||
dirtyCS bool
|
||||
|
||||
// TODO: Keep track of stats such as pause time, number of collected objects, errors
|
||||
lastCollection time.Time
|
||||
// mutationCallbacks are called after each mutation with the flag
|
||||
// set indicating whether any dirty flags are set
|
||||
mutationCallbacks []func(bool)
|
||||
}
|
||||
|
||||
// NewDB creates a new metadata database using the provided
|
||||
@ -183,29 +184,53 @@ func (m *DB) View(fn func(*bolt.Tx) error) error {
|
||||
return m.db.View(fn)
|
||||
}
|
||||
|
||||
// Update runs a writable transation on the metadata store.
|
||||
// Update runs a writable transaction on the metadata store.
|
||||
func (m *DB) Update(fn func(*bolt.Tx) error) error {
|
||||
m.wlock.RLock()
|
||||
defer m.wlock.RUnlock()
|
||||
return m.db.Update(fn)
|
||||
err := m.db.Update(fn)
|
||||
if err == nil {
|
||||
m.dirtyL.Lock()
|
||||
dirty := m.dirtyCS || len(m.dirtySS) > 0
|
||||
for _, fn := range m.mutationCallbacks {
|
||||
fn(dirty)
|
||||
}
|
||||
m.dirtyL.Unlock()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// RegisterMutationCallback registers a function to be called after a metadata
|
||||
// mutations has been performed.
|
||||
//
|
||||
// The callback function in an argument for whether a deletion has occurred
|
||||
// since the last garbage collection.
|
||||
func (m *DB) RegisterMutationCallback(fn func(bool)) {
|
||||
m.dirtyL.Lock()
|
||||
m.mutationCallbacks = append(m.mutationCallbacks, fn)
|
||||
m.dirtyL.Unlock()
|
||||
}
|
||||
|
||||
// GCStats holds the duration for the different phases of the garbage collector
|
||||
type GCStats struct {
|
||||
MetaD time.Duration
|
||||
ContentD time.Duration
|
||||
SnapshotD map[string]time.Duration
|
||||
}
|
||||
|
||||
// GarbageCollect starts garbage collection
|
||||
func (m *DB) GarbageCollect(ctx context.Context) error {
|
||||
lt1 := time.Now()
|
||||
func (m *DB) GarbageCollect(ctx context.Context) (stats GCStats, err error) {
|
||||
m.wlock.Lock()
|
||||
defer func() {
|
||||
m.wlock.Unlock()
|
||||
log.G(ctx).WithField("d", time.Now().Sub(lt1)).Debug("metadata garbage collected")
|
||||
}()
|
||||
t1 := time.Now()
|
||||
|
||||
marked, err := m.getMarked(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
m.wlock.Unlock()
|
||||
return GCStats{}, err
|
||||
}
|
||||
|
||||
m.dirtyL.Lock()
|
||||
defer m.dirtyL.Unlock()
|
||||
|
||||
if err := m.db.Update(func(tx *bolt.Tx) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
@ -232,26 +257,53 @@ func (m *DB) GarbageCollect(ctx context.Context) error {
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
m.dirtyL.Unlock()
|
||||
m.wlock.Unlock()
|
||||
return GCStats{}, err
|
||||
}
|
||||
|
||||
m.lastCollection = time.Now()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if len(m.dirtySS) > 0 {
|
||||
var sl sync.Mutex
|
||||
stats.SnapshotD = map[string]time.Duration{}
|
||||
wg.Add(len(m.dirtySS))
|
||||
for snapshotterName := range m.dirtySS {
|
||||
log.G(ctx).WithField("snapshotter", snapshotterName).Debug("scheduling snapshotter cleanup")
|
||||
go m.cleanupSnapshotter(snapshotterName)
|
||||
go func(snapshotterName string) {
|
||||
st1 := time.Now()
|
||||
m.cleanupSnapshotter(snapshotterName)
|
||||
|
||||
sl.Lock()
|
||||
stats.SnapshotD[snapshotterName] = time.Now().Sub(st1)
|
||||
sl.Unlock()
|
||||
|
||||
wg.Done()
|
||||
}(snapshotterName)
|
||||
}
|
||||
m.dirtySS = map[string]struct{}{}
|
||||
}
|
||||
|
||||
if m.dirtyCS {
|
||||
wg.Add(1)
|
||||
log.G(ctx).Debug("scheduling content cleanup")
|
||||
go m.cleanupContent()
|
||||
go func() {
|
||||
ct1 := time.Now()
|
||||
m.cleanupContent()
|
||||
stats.ContentD = time.Now().Sub(ct1)
|
||||
wg.Done()
|
||||
}()
|
||||
m.dirtyCS = false
|
||||
}
|
||||
|
||||
return nil
|
||||
m.dirtyL.Unlock()
|
||||
|
||||
stats.MetaD = time.Now().Sub(t1)
|
||||
m.wlock.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) {
|
||||
@ -302,27 +354,35 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) {
|
||||
return marked, nil
|
||||
}
|
||||
|
||||
func (m *DB) cleanupSnapshotter(name string) {
|
||||
func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) {
|
||||
ctx := context.Background()
|
||||
sn, ok := m.ss[name]
|
||||
if !ok {
|
||||
return
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
err := sn.garbageCollect(ctx)
|
||||
d, err := sn.garbageCollect(ctx)
|
||||
logger := log.G(ctx).WithField("snapshotter", name)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("snapshotter", name).Warn("garbage collection failed")
|
||||
logger.WithError(err).Warn("snapshot garbage collection failed")
|
||||
} else {
|
||||
logger.WithField("d", d).Debugf("snapshot garbage collected")
|
||||
}
|
||||
return d, err
|
||||
}
|
||||
|
||||
func (m *DB) cleanupContent() {
|
||||
func (m *DB) cleanupContent() (time.Duration, error) {
|
||||
ctx := context.Background()
|
||||
if m.cs == nil {
|
||||
return
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
err := m.cs.garbageCollect(ctx)
|
||||
d, err := m.cs.garbageCollect(ctx)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("content garbage collection failed")
|
||||
} else {
|
||||
log.G(ctx).WithField("d", d).Debugf("content garbage collected")
|
||||
}
|
||||
|
||||
return d, err
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ func TestMetadataCollector(t *testing.T) {
|
||||
t.Fatalf("Creation failed: %+v", err)
|
||||
}
|
||||
|
||||
if err := mdb.GarbageCollect(ctx); err != nil {
|
||||
if _, err := mdb.GarbageCollect(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -322,7 +322,7 @@ func benchmarkTrigger(n int) func(b *testing.B) {
|
||||
|
||||
//b.StartTimer()
|
||||
|
||||
if err := mdb.GarbageCollect(ctx); err != nil {
|
||||
if _, err := mdb.GarbageCollect(ctx); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -183,7 +183,7 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths
|
||||
})
|
||||
}
|
||||
|
||||
func (s *imageStore) Delete(ctx context.Context, name string) error {
|
||||
func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -596,13 +596,14 @@ func validateSnapshot(info *snapshot.Info) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) garbageCollect(ctx context.Context) error {
|
||||
logger := log.G(ctx).WithField("snapshotter", s.name)
|
||||
lt1 := time.Now()
|
||||
func (s *snapshotter) garbageCollect(ctx context.Context) (d time.Duration, err error) {
|
||||
s.l.Lock()
|
||||
t1 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
d = time.Now().Sub(t1)
|
||||
}
|
||||
s.l.Unlock()
|
||||
logger.WithField("t", time.Now().Sub(lt1)).Debugf("garbage collected")
|
||||
}()
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
@ -646,23 +647,26 @@ func (s *snapshotter) garbageCollect(ctx context.Context) error {
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
roots, err := s.walkTree(ctx, seen)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// TODO: Unlock before prune (once nodes are fully unavailable)
|
||||
// TODO: Unlock before removal (once nodes are fully unavailable).
|
||||
// This could be achieved through doing prune inside the lock
|
||||
// and having a cleanup method which actually performs the
|
||||
// deletions on the snapshotters which support it.
|
||||
|
||||
for _, node := range roots {
|
||||
if err := s.pruneBranch(ctx, node); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type treeNode struct {
|
||||
|
@ -54,6 +54,8 @@ const (
|
||||
MetadataPlugin Type = "io.containerd.metadata.v1"
|
||||
// ContentPlugin implements a content store
|
||||
ContentPlugin Type = "io.containerd.content.v1"
|
||||
// GCPlugin implements garbage collection policy
|
||||
GCPlugin Type = "io.containerd.gc.v1"
|
||||
)
|
||||
|
||||
// Registration contains information for registering a plugin
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -162,10 +161,6 @@ func (s *service) Delete(ctx context.Context, req *api.DeleteContainerRequest) (
|
||||
return &ptypes.Empty{}, err
|
||||
}
|
||||
|
||||
if err := s.db.GarbageCollect(ctx); err != nil {
|
||||
return &ptypes.Empty{}, errdefs.ToGRPC(errors.Wrap(err, "garbage collection failed"))
|
||||
}
|
||||
|
||||
return &ptypes.Empty{}, nil
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
package images
|
||||
|
||||
import (
|
||||
gocontext "context"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
eventstypes "github.com/containerd/containerd/api/events"
|
||||
imagesapi "github.com/containerd/containerd/api/services/images/v1"
|
||||
@ -10,7 +12,6 @@ import (
|
||||
"github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -23,26 +24,38 @@ func init() {
|
||||
ID: "images",
|
||||
Requires: []plugin.Type{
|
||||
plugin.MetadataPlugin,
|
||||
plugin.GCPlugin,
|
||||
},
|
||||
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
||||
m, err := ic.Get(plugin.MetadataPlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewService(m.(*metadata.DB), ic.Events), nil
|
||||
g, err := ic.Get(plugin.GCPlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewService(m.(*metadata.DB), ic.Events, g.(gcScheduler)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type gcScheduler interface {
|
||||
ScheduleAndWait(gocontext.Context) (metadata.GCStats, error)
|
||||
}
|
||||
|
||||
type service struct {
|
||||
db *metadata.DB
|
||||
gc gcScheduler
|
||||
publisher events.Publisher
|
||||
}
|
||||
|
||||
// NewService returns the GRPC image server
|
||||
func NewService(db *metadata.DB, publisher events.Publisher) imagesapi.ImagesServer {
|
||||
func NewService(db *metadata.DB, publisher events.Publisher, gc gcScheduler) imagesapi.ImagesServer {
|
||||
return &service{
|
||||
db: db,
|
||||
gc: gc,
|
||||
publisher: publisher,
|
||||
}
|
||||
}
|
||||
@ -163,8 +176,10 @@ func (s *service) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.db.GarbageCollect(ctx); err != nil {
|
||||
return nil, errdefs.ToGRPC(errors.Wrap(err, "garbage collection failed"))
|
||||
if req.Sync {
|
||||
if _, err := s.gc.ScheduleAndWait(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &ptypes.Empty{}, nil
|
||||
|
@ -38,7 +38,7 @@ func createSnapshot(ctx context.Context, sn snapshot.Snapshotter, parent, work s
|
||||
n := fmt.Sprintf("%p-%d", a, rand.Int())
|
||||
prepare := fmt.Sprintf("%s-prepare", n)
|
||||
|
||||
m, err := sn.Prepare(ctx, prepare, parent)
|
||||
m, err := sn.Prepare(ctx, prepare, parent, opt)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to prepare snapshot")
|
||||
}
|
||||
@ -47,7 +47,7 @@ func createSnapshot(ctx context.Context, sn snapshot.Snapshotter, parent, work s
|
||||
return "", errors.Wrap(err, "failed to apply")
|
||||
}
|
||||
|
||||
if err := sn.Commit(ctx, n, prepare); err != nil {
|
||||
if err := sn.Commit(ctx, n, prepare, opt); err != nil {
|
||||
return "", errors.Wrap(err, "failed to commit")
|
||||
}
|
||||
|
||||
@ -66,7 +66,7 @@ func checkSnapshot(ctx context.Context, sn snapshot.Snapshotter, work, name, che
|
||||
}()
|
||||
|
||||
view := fmt.Sprintf("%s-view", name)
|
||||
m, err := sn.View(ctx, view, name)
|
||||
m, err := sn.View(ctx, view, name, opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create view")
|
||||
}
|
||||
|
@ -181,22 +181,22 @@ func checkStatInWalk(ctx context.Context, t *testing.T, sn snapshot.Snapshotter,
|
||||
func createNamedSnapshots(ctx context.Context, snapshotter snapshot.Snapshotter, ns string) error {
|
||||
c1 := fmt.Sprintf("%sc1", ns)
|
||||
c2 := fmt.Sprintf("%sc2", ns)
|
||||
if _, err := snapshotter.Prepare(ctx, c1+"-a", ""); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, c1+"-a", "", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, c1, c1+"-a"); err != nil {
|
||||
if err := snapshotter.Commit(ctx, c1, c1+"-a", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, c2+"-a", c1); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, c2+"-a", c1, opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, c2, c2+"-a"); err != nil {
|
||||
if err := snapshotter.Commit(ctx, c2, c2+"-a", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, fmt.Sprintf("%sa1", ns), c2); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, fmt.Sprintf("%sa1", ns), c2, opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.View(ctx, fmt.Sprintf("%sv1", ns), c2); err != nil {
|
||||
if _, err := snapshotter.View(ctx, fmt.Sprintf("%sv1", ns), c2, opt); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -91,6 +91,10 @@ func makeTest(name string, snapshotterFn func(ctx context.Context, root string)
|
||||
}
|
||||
}
|
||||
|
||||
var opt = snapshot.WithLabels(map[string]string{
|
||||
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
|
||||
})
|
||||
|
||||
// checkSnapshotterBasic tests the basic workflow of a snapshot snapshotter.
|
||||
func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapshot.Snapshotter, work string) {
|
||||
initialApplier := fstest.Apply(
|
||||
@ -112,7 +116,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, "")
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, "", opt)
|
||||
if err != nil {
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
@ -131,7 +135,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
|
||||
}
|
||||
|
||||
committed := filepath.Join(work, "committed")
|
||||
if err := snapshotter.Commit(ctx, committed, preparing); err != nil {
|
||||
if err := snapshotter.Commit(ctx, committed, preparing, opt); err != nil {
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
|
||||
@ -153,7 +157,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
|
||||
mounts, err = snapshotter.Prepare(ctx, next, committed)
|
||||
mounts, err = snapshotter.Prepare(ctx, next, committed, opt)
|
||||
if err != nil {
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
@ -179,7 +183,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
|
||||
assert.Equal(t, snapshot.KindActive, ni.Kind)
|
||||
|
||||
nextCommitted := filepath.Join(work, "committed-next")
|
||||
if err := snapshotter.Commit(ctx, nextCommitted, next); err != nil {
|
||||
if err := snapshotter.Commit(ctx, nextCommitted, next, opt); err != nil {
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
|
||||
@ -220,7 +224,7 @@ func checkSnapshotterBasic(ctx context.Context, t *testing.T, snapshotter snapsh
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
|
||||
mounts, err = snapshotter.View(ctx, nextnext, nextCommitted)
|
||||
mounts, err = snapshotter.View(ctx, nextnext, nextCommitted, opt)
|
||||
if err != nil {
|
||||
t.Fatalf("failure reason: %+v", err)
|
||||
}
|
||||
@ -248,7 +252,7 @@ func checkSnapshotterStatActive(ctx context.Context, t *testing.T, snapshotter s
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, "")
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, "", opt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -282,7 +286,7 @@ func checkSnapshotterStatCommitted(ctx context.Context, t *testing.T, snapshotte
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, "")
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, "", opt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -301,7 +305,7 @@ func checkSnapshotterStatCommitted(ctx context.Context, t *testing.T, snapshotte
|
||||
}
|
||||
|
||||
committed := filepath.Join(work, "committed")
|
||||
if err = snapshotter.Commit(ctx, committed, preparing); err != nil {
|
||||
if err = snapshotter.Commit(ctx, committed, preparing, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -321,7 +325,7 @@ func snapshotterPrepareMount(ctx context.Context, snapshotter snapshot.Snapshott
|
||||
return "", err
|
||||
}
|
||||
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, parent)
|
||||
mounts, err := snapshotter.Prepare(ctx, preparing, parent, opt)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -349,7 +353,7 @@ func checkSnapshotterTransitivity(ctx context.Context, t *testing.T, snapshotter
|
||||
}
|
||||
|
||||
snapA := filepath.Join(work, "snapA")
|
||||
if err = snapshotter.Commit(ctx, snapA, preparing); err != nil {
|
||||
if err = snapshotter.Commit(ctx, snapA, preparing, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -364,7 +368,7 @@ func checkSnapshotterTransitivity(ctx context.Context, t *testing.T, snapshotter
|
||||
}
|
||||
|
||||
snapB := filepath.Join(work, "snapB")
|
||||
if err = snapshotter.Commit(ctx, snapB, next); err != nil {
|
||||
if err = snapshotter.Commit(ctx, snapB, next, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -399,7 +403,7 @@ func checkSnapshotterPrepareView(ctx context.Context, t *testing.T, snapshotter
|
||||
defer testutil.Unmount(t, preparing)
|
||||
|
||||
snapA := filepath.Join(work, "snapA")
|
||||
if err = snapshotter.Commit(ctx, snapA, preparing); err != nil {
|
||||
if err = snapshotter.Commit(ctx, snapA, preparing, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -410,12 +414,12 @@ func checkSnapshotterPrepareView(ctx context.Context, t *testing.T, snapshotter
|
||||
}
|
||||
|
||||
// Prepare & View with same key
|
||||
_, err = snapshotter.Prepare(ctx, newLayer, snapA)
|
||||
_, err = snapshotter.Prepare(ctx, newLayer, snapA, opt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = snapshotter.View(ctx, newLayer, snapA)
|
||||
_, err = snapshotter.View(ctx, newLayer, snapA, opt)
|
||||
//must be err != nil
|
||||
assert.NotNil(t, err)
|
||||
|
||||
@ -425,12 +429,12 @@ func checkSnapshotterPrepareView(ctx context.Context, t *testing.T, snapshotter
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = snapshotter.Prepare(ctx, prepLayer, snapA)
|
||||
_, err = snapshotter.Prepare(ctx, prepLayer, snapA, opt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = snapshotter.Prepare(ctx, prepLayer, snapA)
|
||||
_, err = snapshotter.Prepare(ctx, prepLayer, snapA, opt)
|
||||
//must be err != nil
|
||||
assert.NotNil(t, err)
|
||||
|
||||
@ -440,12 +444,12 @@ func checkSnapshotterPrepareView(ctx context.Context, t *testing.T, snapshotter
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = snapshotter.View(ctx, viewLayer, snapA)
|
||||
_, err = snapshotter.View(ctx, viewLayer, snapA, opt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = snapshotter.View(ctx, viewLayer, snapA)
|
||||
_, err = snapshotter.View(ctx, viewLayer, snapA, opt)
|
||||
//must be err != nil
|
||||
assert.NotNil(t, err)
|
||||
|
||||
@ -479,24 +483,24 @@ func checkRemoveIntermediateSnapshot(ctx context.Context, t *testing.T, snapshot
|
||||
defer testutil.Unmount(t, base)
|
||||
|
||||
committedBase := filepath.Join(work, "committed-base")
|
||||
if err = snapshotter.Commit(ctx, committedBase, base); err != nil {
|
||||
if err = snapshotter.Commit(ctx, committedBase, base, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create intermediate layer
|
||||
intermediate := filepath.Join(work, "intermediate")
|
||||
if _, err = snapshotter.Prepare(ctx, intermediate, committedBase); err != nil {
|
||||
if _, err = snapshotter.Prepare(ctx, intermediate, committedBase, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
committedInter := filepath.Join(work, "committed-inter")
|
||||
if err = snapshotter.Commit(ctx, committedInter, intermediate); err != nil {
|
||||
if err = snapshotter.Commit(ctx, committedInter, intermediate, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create top layer
|
||||
topLayer := filepath.Join(work, "toplayer")
|
||||
if _, err = snapshotter.Prepare(ctx, topLayer, committedInter); err != nil {
|
||||
if _, err = snapshotter.Prepare(ctx, topLayer, committedInter, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -530,28 +534,28 @@ func checkRemoveIntermediateSnapshot(ctx context.Context, t *testing.T, snapshot
|
||||
// v1 - view snapshot, v1 is parent
|
||||
// v2 - view snapshot, no parent
|
||||
func baseTestSnapshots(ctx context.Context, snapshotter snapshot.Snapshotter) error {
|
||||
if _, err := snapshotter.Prepare(ctx, "c1-a", ""); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, "c1-a", "", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, "c1", "c1-a"); err != nil {
|
||||
if err := snapshotter.Commit(ctx, "c1", "c1-a", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, "c2-a", "c1"); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, "c2-a", "c1", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, "c2", "c2-a"); err != nil {
|
||||
if err := snapshotter.Commit(ctx, "c2", "c2-a", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, "a1", "c2"); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, "a1", "c2", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, "a2", ""); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, "a2", "", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.View(ctx, "v1", "c2"); err != nil {
|
||||
if _, err := snapshotter.View(ctx, "v1", "c2", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.View(ctx, "v2", ""); err != nil {
|
||||
if _, err := snapshotter.View(ctx, "v2", "", opt); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -623,10 +627,13 @@ func checkUpdate(ctx context.Context, t *testing.T, snapshotter snapshot.Snapsho
|
||||
}
|
||||
|
||||
createdAt := st.Created
|
||||
rootTime := time.Now().UTC().Format(time.RFC3339)
|
||||
expected := map[string]string{
|
||||
"l1": "v1",
|
||||
"l2": "v2",
|
||||
"l3": "v3",
|
||||
// Keep root label
|
||||
"containerd.io/gc.root": rootTime,
|
||||
}
|
||||
st.Parent = "doesnotexist"
|
||||
st.Labels = expected
|
||||
@ -662,6 +669,7 @@ func checkUpdate(ctx context.Context, t *testing.T, snapshotter snapshot.Snapsho
|
||||
expected = map[string]string{
|
||||
"l1": "updated",
|
||||
"l3": "v3",
|
||||
"containerd.io/gc.root": rootTime,
|
||||
}
|
||||
st.Labels = map[string]string{
|
||||
"l1": "updated",
|
||||
@ -675,6 +683,7 @@ func checkUpdate(ctx context.Context, t *testing.T, snapshotter snapshot.Snapsho
|
||||
|
||||
expected = map[string]string{
|
||||
"l4": "v4",
|
||||
"containerd.io/gc.root": rootTime,
|
||||
}
|
||||
st.Labels = expected
|
||||
st, err = snapshotter.Update(ctx, st, "labels")
|
||||
@ -709,31 +718,31 @@ func assertLabels(t *testing.T, actual, expected map[string]string) {
|
||||
}
|
||||
|
||||
func checkRemove(ctx context.Context, t *testing.T, snapshotter snapshot.Snapshotter, work string) {
|
||||
if _, err := snapshotter.Prepare(ctx, "committed-a", ""); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, "committed-a", "", opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, "committed-1", "committed-a"); err != nil {
|
||||
if err := snapshotter.Commit(ctx, "committed-1", "committed-a", opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, "reuse-1", "committed-1"); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, "reuse-1", "committed-1", opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := snapshotter.Remove(ctx, "reuse-1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := snapshotter.View(ctx, "reuse-1", "committed-1"); err != nil {
|
||||
if _, err := snapshotter.View(ctx, "reuse-1", "committed-1", opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := snapshotter.Remove(ctx, "reuse-1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, "reuse-1", ""); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, "reuse-1", "", opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := snapshotter.Remove(ctx, "committed-1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, "commited-1", "reuse-1"); err != nil {
|
||||
if err := snapshotter.Commit(ctx, "commited-1", "reuse-1", opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -742,15 +751,15 @@ func checkRemove(ctx context.Context, t *testing.T, snapshotter snapshot.Snapsho
|
||||
// This function is called only when WithTestViewReadonly is true.
|
||||
func checkSnapshotterViewReadonly(ctx context.Context, t *testing.T, snapshotter snapshot.Snapshotter, work string) {
|
||||
preparing := filepath.Join(work, "preparing")
|
||||
if _, err := snapshotter.Prepare(ctx, preparing, ""); err != nil {
|
||||
if _, err := snapshotter.Prepare(ctx, preparing, "", opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
committed := filepath.Join(work, "commited")
|
||||
if err := snapshotter.Commit(ctx, committed, preparing); err != nil {
|
||||
if err := snapshotter.Commit(ctx, committed, preparing, opt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
view := filepath.Join(work, "view")
|
||||
m, err := snapshotter.View(ctx, view, committed)
|
||||
m, err := snapshotter.View(ctx, view, committed, opt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user