From edb38dfecce68ff48c186373a0668eb72cb40d73 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Tue, 13 Mar 2018 04:41:11 +0000 Subject: [PATCH] Update containerd to 3013762fc58941e33ba70e8f8d9256911f134124 Signed-off-by: Lantao Liu --- vendor.conf | 5 +- vendor/github.com/containerd/cgroups/blkio.go | 16 + .../github.com/containerd/cgroups/cgroup.go | 16 + .../github.com/containerd/cgroups/control.go | 16 + vendor/github.com/containerd/cgroups/cpu.go | 16 + .../github.com/containerd/cgroups/cpuacct.go | 16 + .../github.com/containerd/cgroups/cpuset.go | 16 + .../github.com/containerd/cgroups/devices.go | 22 +- .../github.com/containerd/cgroups/errors.go | 16 + .../github.com/containerd/cgroups/freezer.go | 16 + .../containerd/cgroups/hierarchy.go | 16 + .../github.com/containerd/cgroups/hugetlb.go | 16 + .../github.com/containerd/cgroups/memory.go | 16 + vendor/github.com/containerd/cgroups/named.go | 16 + .../github.com/containerd/cgroups/net_cls.go | 16 + .../github.com/containerd/cgroups/net_prio.go | 16 + vendor/github.com/containerd/cgroups/paths.go | 16 + .../containerd/cgroups/perf_event.go | 16 + vendor/github.com/containerd/cgroups/pids.go | 16 + vendor/github.com/containerd/cgroups/state.go | 16 + .../containerd/cgroups/subsystem.go | 16 + .../github.com/containerd/cgroups/systemd.go | 16 + vendor/github.com/containerd/cgroups/ticks.go | 16 + vendor/github.com/containerd/cgroups/utils.go | 16 + vendor/github.com/containerd/cgroups/v1.go | 16 + .../containerd/containerd/archive/strconv.go | 2 +- .../containerd/containerd/archive/tar.go | 2 +- .../containerd/containerd/archive/tar_unix.go | 25 +- .../containerd/archive/tar_windows.go | 2 +- .../containerd/containerd/client.go | 166 ++-- .../containerd/containerd/client_opts.go | 12 + .../github.com/containerd/containerd/diff.go | 5 +- .../containerd/containerd/events.go | 119 +++ .../containerd/containerd/images/handlers.go | 3 +- .../containerd/containerd/images/image.go | 4 +- .../github.com/containerd/containerd/lease.go | 6 +- .../containerd/containerd/linux/bundle.go | 23 +- .../containerd/containerd/linux/proc/io.go | 114 ++- .../containerd/containerd/linux/runtime.go | 6 +- .../containerd/containerd/metadata/db.go | 9 + .../containerd/containerd/mount/temp.go | 16 + .../containerd/containerd/mount/temp_unix.go | 16 + .../containerd/mount/temp_unsupported.go | 16 + .../containerd/platforms/platforms.go | 41 +- .../containerd/containerd/plugin/plugin.go | 2 + .../containerd/remotes/docker/resolver.go | 3 +- .../containerd/containerd/services.go | 112 +++ .../containerd/services/containers/local.go | 189 ++++ .../containerd/services/containers/service.go | 150 +-- .../containerd/services/content/service.go | 38 +- .../containerd/services/content/store.go | 71 ++ .../containerd/services/diff/local.go | 178 ++++ .../containerd/services/diff/service.go | 142 +-- .../containerd/services/images/local.go | 183 ++++ .../containerd/services/images/service.go | 141 +-- .../containerd/services/leases/local.go | 120 +++ .../containerd/services/leases/service.go | 91 +- .../containerd/services/namespaces/local.go | 223 +++++ .../containerd/services/namespaces/service.go | 184 +--- .../containerd/services/services.go | 36 + .../containerd/services/snapshots/service.go | 46 +- .../services/snapshots/snapshotters.go | 98 ++ .../containerd/services/tasks/local.go | 634 +++++++++++++ .../containerd/services/tasks/service.go | 577 +----------- .../containerd/snapshots/overlay/check.go | 88 ++ .../containerd/containerd/vendor.conf | 19 +- vendor/github.com/dmcgowan/go-tar/LICENSE | 27 - vendor/github.com/dmcgowan/go-tar/common.go | 796 ---------------- vendor/github.com/dmcgowan/go-tar/format.go | 301 ------- vendor/github.com/dmcgowan/go-tar/reader.go | 852 ------------------ .../github.com/dmcgowan/go-tar/sparse_unix.go | 77 -- .../dmcgowan/go-tar/sparse_windows.go | 129 --- .../dmcgowan/go-tar/stat_actime1.go | 20 - .../dmcgowan/go-tar/stat_actime2.go | 20 - .../github.com/dmcgowan/go-tar/stat_unix.go | 96 -- vendor/github.com/dmcgowan/go-tar/strconv.go | 326 ------- vendor/github.com/dmcgowan/go-tar/writer.go | 629 ------------- .../pkg/util/interrupt/interrupt.go | 104 --- 78 files changed, 2922 insertions(+), 4762 deletions(-) create mode 100644 vendor/github.com/containerd/containerd/events.go create mode 100644 vendor/github.com/containerd/containerd/services.go create mode 100644 vendor/github.com/containerd/containerd/services/containers/local.go create mode 100644 vendor/github.com/containerd/containerd/services/content/store.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/local.go create mode 100644 vendor/github.com/containerd/containerd/services/images/local.go create mode 100644 vendor/github.com/containerd/containerd/services/leases/local.go create mode 100644 vendor/github.com/containerd/containerd/services/namespaces/local.go create mode 100644 vendor/github.com/containerd/containerd/services/services.go create mode 100644 vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go create mode 100644 vendor/github.com/containerd/containerd/services/tasks/local.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/overlay/check.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/LICENSE delete mode 100644 vendor/github.com/dmcgowan/go-tar/common.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/format.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/reader.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/sparse_unix.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/sparse_windows.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/stat_actime1.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/stat_actime2.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/stat_unix.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/strconv.go delete mode 100644 vendor/github.com/dmcgowan/go-tar/writer.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go diff --git a/vendor.conf b/vendor.conf index 71fe38da7..73d63e523 100644 --- a/vendor.conf +++ b/vendor.conf @@ -2,9 +2,9 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/blang/semver v3.1.0 github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 -github.com/containerd/cgroups c0710c92e8b3a44681d1321dcfd1360fc5c6c089 +github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e -github.com/containerd/containerd 25c403415aa99d0f3a609043429f3d24c8b70c0c +github.com/containerd/containerd 3013762fc58941e33ba70e8f8d9256911f134124 github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f @@ -14,7 +14,6 @@ github.com/containernetworking/plugins v0.6.0 github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 github.com/cri-o/ocicni 9b451e26eb7c694d564991fbf44f77d0afb9b03c github.com/davecgh/go-spew v1.1.0 -github.com/dmcgowan/go-tar go1.10 github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go index 9b15b8a62..fc1e689cb 100644 --- a/vendor/github.com/containerd/cgroups/blkio.go +++ b/vendor/github.com/containerd/cgroups/blkio.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go index d1c36bde3..d4c40f749 100644 --- a/vendor/github.com/containerd/cgroups/cgroup.go +++ b/vendor/github.com/containerd/cgroups/cgroup.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/control.go b/vendor/github.com/containerd/cgroups/control.go index 2de26732c..a554a528f 100644 --- a/vendor/github.com/containerd/cgroups/control.go +++ b/vendor/github.com/containerd/cgroups/control.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/cpu.go b/vendor/github.com/containerd/cgroups/cpu.go index 7df1b1cdf..431cd3e51 100644 --- a/vendor/github.com/containerd/cgroups/cpu.go +++ b/vendor/github.com/containerd/cgroups/cpu.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go index 443692d21..42a490a87 100644 --- a/vendor/github.com/containerd/cgroups/cpuacct.go +++ b/vendor/github.com/containerd/cgroups/cpuacct.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go index f0f95f569..50d95da93 100644 --- a/vendor/github.com/containerd/cgroups/cpuset.go +++ b/vendor/github.com/containerd/cgroups/cpuset.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/devices.go b/vendor/github.com/containerd/cgroups/devices.go index f0dca5c54..f9a118b22 100644 --- a/vendor/github.com/containerd/cgroups/devices.go +++ b/vendor/github.com/containerd/cgroups/devices.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( @@ -58,11 +74,11 @@ func (d *devicesController) Update(path string, resources *specs.LinuxResources) } func deviceString(device specs.LinuxDeviceCgroup) string { - return fmt.Sprintf("%c %s:%s %s", - &device.Type, + return fmt.Sprintf("%s %s:%s %s", + device.Type, deviceNumber(device.Major), deviceNumber(device.Minor), - &device.Access, + device.Access, ) } diff --git a/vendor/github.com/containerd/cgroups/errors.go b/vendor/github.com/containerd/cgroups/errors.go index a5824fe23..f1ad8315c 100644 --- a/vendor/github.com/containerd/cgroups/errors.go +++ b/vendor/github.com/containerd/cgroups/errors.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go index 0beefc6ea..5e668408a 100644 --- a/vendor/github.com/containerd/cgroups/freezer.go +++ b/vendor/github.com/containerd/cgroups/freezer.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/hierarchy.go b/vendor/github.com/containerd/cgroups/hierarchy.go index b61660d41..9221bf3f1 100644 --- a/vendor/github.com/containerd/cgroups/hierarchy.go +++ b/vendor/github.com/containerd/cgroups/hierarchy.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups // Hierarchy enableds both unified and split hierarchy for cgroups diff --git a/vendor/github.com/containerd/cgroups/hugetlb.go b/vendor/github.com/containerd/cgroups/hugetlb.go index af75e1b8e..3718706d7 100644 --- a/vendor/github.com/containerd/cgroups/hugetlb.go +++ b/vendor/github.com/containerd/cgroups/hugetlb.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go index 3ecb0f77f..ce15ca2b9 100644 --- a/vendor/github.com/containerd/cgroups/memory.go +++ b/vendor/github.com/containerd/cgroups/memory.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/named.go b/vendor/github.com/containerd/cgroups/named.go index f0fbf018e..06b16c3b1 100644 --- a/vendor/github.com/containerd/cgroups/named.go +++ b/vendor/github.com/containerd/cgroups/named.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import "path/filepath" diff --git a/vendor/github.com/containerd/cgroups/net_cls.go b/vendor/github.com/containerd/cgroups/net_cls.go index 1558444bd..8f1a2651f 100644 --- a/vendor/github.com/containerd/cgroups/net_cls.go +++ b/vendor/github.com/containerd/cgroups/net_cls.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/net_prio.go b/vendor/github.com/containerd/cgroups/net_prio.go index 0959b8e70..c77169215 100644 --- a/vendor/github.com/containerd/cgroups/net_prio.go +++ b/vendor/github.com/containerd/cgroups/net_prio.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/paths.go b/vendor/github.com/containerd/cgroups/paths.go index 1afc24b66..455ce857f 100644 --- a/vendor/github.com/containerd/cgroups/paths.go +++ b/vendor/github.com/containerd/cgroups/paths.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/perf_event.go b/vendor/github.com/containerd/cgroups/perf_event.go index 0fa43ec1f..648786db6 100644 --- a/vendor/github.com/containerd/cgroups/perf_event.go +++ b/vendor/github.com/containerd/cgroups/perf_event.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import "path/filepath" diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go index dcf4e29e1..a1cfcb88d 100644 --- a/vendor/github.com/containerd/cgroups/pids.go +++ b/vendor/github.com/containerd/cgroups/pids.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/state.go b/vendor/github.com/containerd/cgroups/state.go index f1d7b7b60..cfeabbbc6 100644 --- a/vendor/github.com/containerd/cgroups/state.go +++ b/vendor/github.com/containerd/cgroups/state.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups // State is a type that represents the state of the current cgroup diff --git a/vendor/github.com/containerd/cgroups/subsystem.go b/vendor/github.com/containerd/cgroups/subsystem.go index 9393eec19..e1f10ec68 100644 --- a/vendor/github.com/containerd/cgroups/subsystem.go +++ b/vendor/github.com/containerd/cgroups/subsystem.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/systemd.go b/vendor/github.com/containerd/cgroups/systemd.go index fd816e32e..8153d744c 100644 --- a/vendor/github.com/containerd/cgroups/systemd.go +++ b/vendor/github.com/containerd/cgroups/systemd.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/ticks.go b/vendor/github.com/containerd/cgroups/ticks.go index f471c5ae7..84dc38d0c 100644 --- a/vendor/github.com/containerd/cgroups/ticks.go +++ b/vendor/github.com/containerd/cgroups/ticks.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups func getClockTicks() uint64 { diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go index d5c7230a0..4e6a27054 100644 --- a/vendor/github.com/containerd/cgroups/utils.go +++ b/vendor/github.com/containerd/cgroups/utils.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/cgroups/v1.go b/vendor/github.com/containerd/cgroups/v1.go index f6608f7c7..a076d4692 100644 --- a/vendor/github.com/containerd/cgroups/v1.go +++ b/vendor/github.com/containerd/cgroups/v1.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cgroups import ( diff --git a/vendor/github.com/containerd/containerd/archive/strconv.go b/vendor/github.com/containerd/containerd/archive/strconv.go index d262e9059..13746e4b6 100644 --- a/vendor/github.com/containerd/containerd/archive/strconv.go +++ b/vendor/github.com/containerd/containerd/archive/strconv.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/dmcgowan/go-tar" + "archive/tar" ) // Forked from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index 7e9182c8f..dafd770fb 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -17,6 +17,7 @@ package archive import ( + "archive/tar" "context" "fmt" "io" @@ -31,7 +32,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/continuity/fs" - "github.com/dmcgowan/go-tar" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index f577996ee..022dd6d4f 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -19,13 +19,13 @@ package archive import ( + "archive/tar" "context" "os" "sync" "syscall" "github.com/containerd/continuity/sysx" - "github.com/dmcgowan/go-tar" "github.com/opencontainers/runc/libcontainer/system" "github.com/pkg/errors" "golang.org/x/sys/unix" @@ -87,10 +87,6 @@ func mkdir(path string, perm os.FileMode) error { return os.Chmod(path, perm) } -func skipFile(*tar.Header) bool { - return false -} - var ( inUserNS bool nsOnce sync.Once @@ -100,15 +96,22 @@ func setInUserNS() { inUserNS = system.RunningInUserNS() } -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - nsOnce.Do(setInUserNS) - if inUserNS { +func skipFile(hdr *tar.Header) bool { + switch hdr.Typeflag { + case tar.TypeBlock, tar.TypeChar: // cannot create a device if running in user namespace - return nil + nsOnce.Do(setInUserNS) + return inUserNS + default: + return false } +} +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo. +// This function must not be called for Block and Char when running in userns. +// (skipFile() should return true for them.) +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go index 7172e735c..80edcbbe0 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -19,6 +19,7 @@ package archive import ( + "archive/tar" "bufio" "context" "encoding/base64" @@ -36,7 +37,6 @@ import ( "github.com/Microsoft/hcsshim" "github.com/containerd/containerd/log" "github.com/containerd/containerd/sys" - "github.com/dmcgowan/go-tar" ) const ( diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index 2ac256dd9..1ec0eb549 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -31,6 +31,7 @@ import ( eventsapi "github.com/containerd/containerd/api/services/events/v1" imagesapi "github.com/containerd/containerd/api/services/images/v1" introspectionapi "github.com/containerd/containerd/api/services/introspection/v1" + leasesapi "github.com/containerd/containerd/api/services/leases/v1" namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1" snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/api/services/tasks/v1" @@ -39,6 +40,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/dialer" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/platforms" @@ -75,54 +77,73 @@ func New(address string, opts ...ClientOpt) (*Client, error) { return nil, err } } - gopts := []grpc.DialOption{ - grpc.WithBlock(), - grpc.WithInsecure(), - grpc.WithTimeout(60 * time.Second), - grpc.FailOnNonTempDialError(true), - grpc.WithBackoffMaxDelay(3 * time.Second), - grpc.WithDialer(dialer.Dialer), + c := &Client{ + runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), } - if len(copts.dialOptions) > 0 { - gopts = copts.dialOptions + if copts.services != nil { + c.services = *copts.services } - if copts.defaultns != "" { - unary, stream := newNSInterceptors(copts.defaultns) - gopts = append(gopts, - grpc.WithUnaryInterceptor(unary), - grpc.WithStreamInterceptor(stream), - ) - } - connector := func() (*grpc.ClientConn, error) { - conn, err := grpc.Dial(dialer.DialAddress(address), gopts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q", address) + if address != "" { + gopts := []grpc.DialOption{ + grpc.WithBlock(), + grpc.WithInsecure(), + grpc.WithTimeout(60 * time.Second), + grpc.FailOnNonTempDialError(true), + grpc.WithBackoffMaxDelay(3 * time.Second), + grpc.WithDialer(dialer.Dialer), } - return conn, nil + if len(copts.dialOptions) > 0 { + gopts = copts.dialOptions + } + if copts.defaultns != "" { + unary, stream := newNSInterceptors(copts.defaultns) + gopts = append(gopts, + grpc.WithUnaryInterceptor(unary), + grpc.WithStreamInterceptor(stream), + ) + } + connector := func() (*grpc.ClientConn, error) { + conn, err := grpc.Dial(dialer.DialAddress(address), gopts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial %q", address) + } + return conn, nil + } + conn, err := connector() + if err != nil { + return nil, err + } + c.conn, c.connector = conn, connector } - conn, err := connector() - if err != nil { - return nil, err + if copts.services == nil && c.conn == nil { + return nil, errors.New("no grpc connection or services is available") } - return &Client{ - conn: conn, - connector: connector, - runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), - }, nil + return c, nil } // NewWithConn returns a new containerd client that is connected to the containerd // instance provided by the connection func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) { - return &Client{ + var copts clientOpts + for _, o := range opts { + if err := o(&copts); err != nil { + return nil, err + } + } + c := &Client{ conn: conn, runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), - }, nil + } + if copts.services != nil { + c.services = *copts.services + } + return c, nil } // Client is the client to interact with containerd and its various services // using a uniform interface type Client struct { + services conn *grpc.ClientConn runtime string connector func() (*grpc.ClientConn, error) @@ -149,6 +170,9 @@ func (c *Client) Reconnect() error { // connection. A timeout can be set in the context to ensure it returns // early. func (c *Client) IsServing(ctx context.Context) (bool, error) { + if c.conn == nil { + return false, errors.New("no grpc connection available") + } r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false)) if err != nil { return false, err @@ -385,43 +409,8 @@ func (c *Client) ListImages(ctx context.Context, filters ...string) ([]Image, er // // The subscriber can stop receiving events by canceling the provided context. // The errs channel will be closed and return a nil error. -func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *eventsapi.Envelope, errs <-chan error) { - var ( - evq = make(chan *eventsapi.Envelope) - errq = make(chan error, 1) - ) - - errs = errq - ch = evq - - session, err := c.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{ - Filters: filters, - }) - if err != nil { - errq <- err - close(errq) - return - } - - go func() { - defer close(errq) - - for { - ev, err := session.Recv() - if err != nil { - errq <- err - return - } - - select { - case evq <- ev: - case <-ctx.Done(): - return - } - } - }() - - return ch, errs +func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error) { + return c.EventService().Subscribe(ctx, filters...) } // Close closes the clients connection to containerd @@ -431,36 +420,57 @@ func (c *Client) Close() error { // NamespaceService returns the underlying Namespaces Store func (c *Client) NamespaceService() namespaces.Store { + if c.namespaceStore != nil { + return c.namespaceStore + } return NewNamespaceStoreFromClient(namespacesapi.NewNamespacesClient(c.conn)) } // ContainerService returns the underlying container Store func (c *Client) ContainerService() containers.Store { + if c.containerStore != nil { + return c.containerStore + } return NewRemoteContainerStore(containersapi.NewContainersClient(c.conn)) } // ContentStore returns the underlying content Store func (c *Client) ContentStore() content.Store { + if c.contentStore != nil { + return c.contentStore + } return NewContentStoreFromClient(contentapi.NewContentClient(c.conn)) } // SnapshotService returns the underlying snapshotter for the provided snapshotter name func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { + if c.snapshotters != nil { + return c.snapshotters[snapshotterName] + } return NewSnapshotterFromClient(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName) } // TaskService returns the underlying TasksClient func (c *Client) TaskService() tasks.TasksClient { + if c.taskService != nil { + return c.taskService + } return tasks.NewTasksClient(c.conn) } // ImageService returns the underlying image Store func (c *Client) ImageService() images.Store { + if c.imageStore != nil { + return c.imageStore + } return NewImageStoreFromClient(imagesapi.NewImagesClient(c.conn)) } // DiffService returns the underlying Differ func (c *Client) DiffService() DiffService { + if c.diffService != nil { + return c.diffService + } return NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn)) } @@ -469,14 +479,25 @@ func (c *Client) IntrospectionService() introspectionapi.IntrospectionClient { return introspectionapi.NewIntrospectionClient(c.conn) } +// LeasesService returns the underlying Leases Client +func (c *Client) LeasesService() leasesapi.LeasesClient { + if c.leasesService != nil { + return c.leasesService + } + return leasesapi.NewLeasesClient(c.conn) +} + // HealthService returns the underlying GRPC HealthClient func (c *Client) HealthService() grpc_health_v1.HealthClient { return grpc_health_v1.NewHealthClient(c.conn) } -// EventService returns the underlying EventsClient -func (c *Client) EventService() eventsapi.EventsClient { - return eventsapi.NewEventsClient(c.conn) +// EventService returns the underlying event service +func (c *Client) EventService() EventService { + if c.eventService != nil { + return c.eventService + } + return NewEventServiceFromClient(eventsapi.NewEventsClient(c.conn)) } // VersionService returns the underlying VersionClient @@ -494,6 +515,9 @@ type Version struct { // Version returns the version of containerd that the client is connected to func (c *Client) Version(ctx context.Context) (Version, error) { + if c.conn == nil { + return Version{}, errors.New("no grpc connection available") + } response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) if err != nil { return Version{}, err diff --git a/vendor/github.com/containerd/containerd/client_opts.go b/vendor/github.com/containerd/containerd/client_opts.go index dfa02ee7b..1859c4865 100644 --- a/vendor/github.com/containerd/containerd/client_opts.go +++ b/vendor/github.com/containerd/containerd/client_opts.go @@ -24,6 +24,7 @@ import ( type clientOpts struct { defaultns string + services *services dialOptions []grpc.DialOption } @@ -49,6 +50,17 @@ func WithDialOpts(opts []grpc.DialOption) ClientOpt { } } +// WithServices sets services used by the client. +func WithServices(opts ...ServicesOpt) ClientOpt { + return func(c *clientOpts) error { + c.services = &services{} + for _, o := range opts { + o(c.services) + } + return nil + } +} + // RemoteOpt allows the caller to set distribution options for a remote type RemoteOpt func(*Client, *RemoteContext) error diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index af95e03e7..8d1219e35 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -22,6 +22,7 @@ import ( diffapi "github.com/containerd/containerd/api/services/diff/v1" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -51,7 +52,7 @@ func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts } resp, err := r.client.Apply(ctx, req) if err != nil { - return ocispec.Descriptor{}, err + return ocispec.Descriptor{}, errdefs.FromGRPC(err) } return toDescriptor(resp.Applied), nil } @@ -72,7 +73,7 @@ func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...di } resp, err := r.client.Diff(ctx, req) if err != nil { - return ocispec.Descriptor{}, err + return ocispec.Descriptor{}, errdefs.FromGRPC(err) } return toDescriptor(resp.Diff), nil } diff --git a/vendor/github.com/containerd/containerd/events.go b/vendor/github.com/containerd/containerd/events.go new file mode 100644 index 000000000..92e9cd510 --- /dev/null +++ b/vendor/github.com/containerd/containerd/events.go @@ -0,0 +1,119 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + + eventsapi "github.com/containerd/containerd/api/services/events/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/typeurl" +) + +// EventService handles the publish, forward and subscribe of events. +type EventService interface { + events.Publisher + events.Forwarder + events.Subscriber +} + +// NewEventServiceFromClient returns a new event service which communicates +// over a GRPC connection. +func NewEventServiceFromClient(client eventsapi.EventsClient) EventService { + return &eventRemote{ + client: client, + } +} + +type eventRemote struct { + client eventsapi.EventsClient +} + +func (e *eventRemote) Publish(ctx context.Context, topic string, event events.Event) error { + any, err := typeurl.MarshalAny(event) + if err != nil { + return err + } + req := &eventsapi.PublishRequest{ + Topic: topic, + Event: any, + } + if _, err := e.client.Publish(ctx, req); err != nil { + return errdefs.FromGRPC(err) + } + return nil +} + +func (e *eventRemote) Forward(ctx context.Context, envelope *events.Envelope) error { + req := &eventsapi.ForwardRequest{ + Envelope: &eventsapi.Envelope{ + Timestamp: envelope.Timestamp, + Namespace: envelope.Namespace, + Topic: envelope.Topic, + Event: envelope.Event, + }, + } + if _, err := e.client.Forward(ctx, req); err != nil { + return errdefs.FromGRPC(err) + } + return nil +} + +func (e *eventRemote) Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error) { + var ( + evq = make(chan *events.Envelope) + errq = make(chan error, 1) + ) + + errs = errq + ch = evq + + session, err := e.client.Subscribe(ctx, &eventsapi.SubscribeRequest{ + Filters: filters, + }) + if err != nil { + errq <- err + close(errq) + return + } + + go func() { + defer close(errq) + + for { + ev, err := session.Recv() + if err != nil { + errq <- err + return + } + + select { + case evq <- &events.Envelope{ + Timestamp: ev.Timestamp, + Namespace: ev.Namespace, + Topic: ev.Topic, + Event: ev.Event, + }: + case <-ctx.Done(): + return + } + } + }() + + return ch, errs +} diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go index b95251c57..f127eb27f 100644 --- a/vendor/github.com/containerd/containerd/images/handlers.go +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -193,10 +193,11 @@ func FilterPlatform(platform string, f HandlerFunc) HandlerFunc { var descs []ocispec.Descriptor if platform != "" && isMultiPlatform(desc.MediaType) { - matcher, err := platforms.Parse(platform) + p, err := platforms.Parse(platform) if err != nil { return nil, err } + matcher := platforms.NewMatcher(p) for _, d := range children { if d.Platform == nil || matcher.Match(*d.Platform) { diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index cdcf0af34..e986fcb2f 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -131,13 +131,13 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc var ( matcher platforms.Matcher m *ocispec.Manifest - err error ) if platform != "" { - matcher, err = platforms.Parse(platform) + p, err := platforms.Parse(platform) if err != nil { return ocispec.Manifest{}, err } + matcher = platforms.NewMatcher(p) } if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { diff --git a/vendor/github.com/containerd/containerd/lease.go b/vendor/github.com/containerd/containerd/lease.go index 6187c1df7..5fc7833f8 100644 --- a/vendor/github.com/containerd/containerd/lease.go +++ b/vendor/github.com/containerd/containerd/lease.go @@ -36,7 +36,7 @@ type Lease struct { // CreateLease creates a new lease func (c *Client) CreateLease(ctx context.Context) (Lease, error) { - lapi := leasesapi.NewLeasesClient(c.conn) + lapi := c.LeasesService() resp, err := lapi.Create(ctx, &leasesapi.CreateRequest{}) if err != nil { return Lease{}, err @@ -50,7 +50,7 @@ func (c *Client) CreateLease(ctx context.Context) (Lease, error) { // ListLeases lists active leases func (c *Client) ListLeases(ctx context.Context) ([]Lease, error) { - lapi := leasesapi.NewLeasesClient(c.conn) + lapi := c.LeasesService() resp, err := lapi.List(ctx, &leasesapi.ListRequest{}) if err != nil { return nil, err @@ -100,7 +100,7 @@ func (l Lease) CreatedAt() time.Time { // Delete deletes the lease, removing the reference to all resources created // during the lease. func (l Lease) Delete(ctx context.Context) error { - lapi := leasesapi.NewLeasesClient(l.client.conn) + lapi := l.client.LeasesService() _, err := lapi.Delete(ctx, &leasesapi.DeleteRequest{ ID: l.id, }) diff --git a/vendor/github.com/containerd/containerd/linux/bundle.go b/vendor/github.com/containerd/containerd/linux/bundle.go index 735547c5c..e87457131 100644 --- a/vendor/github.com/containerd/containerd/linux/bundle.go +++ b/vendor/github.com/containerd/containerd/linux/bundle.go @@ -85,24 +85,25 @@ type bundle struct { type ShimOpt func(*bundle, string, *runctypes.RuncOptions) (shim.Config, client.Opt) // ShimRemote is a ShimOpt for connecting and starting a remote shim -func ShimRemote(shimBinary, daemonAddress, cgroup string, debug bool, exitHandler func()) ShimOpt { +func ShimRemote(c *Config, daemonAddress, cgroup string, exitHandler func()) ShimOpt { return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { - return b.shimConfig(ns, ropts), - client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, debug, exitHandler) + config := b.shimConfig(ns, c, ropts) + return config, + client.WithStart(c.Shim, b.shimAddress(ns), daemonAddress, cgroup, c.ShimDebug, exitHandler) } } // ShimLocal is a ShimOpt for using an in process shim implementation -func ShimLocal(exchange *exchange.Exchange) ShimOpt { +func ShimLocal(c *Config, exchange *exchange.Exchange) ShimOpt { return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { - return b.shimConfig(ns, ropts), client.WithLocal(exchange) + return b.shimConfig(ns, c, ropts), client.WithLocal(exchange) } } // ShimConnect is a ShimOpt for connecting to an existing remote shim -func ShimConnect(onClose func()) ShimOpt { +func ShimConnect(c *Config, onClose func()) ShimOpt { return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { - return b.shimConfig(ns, ropts), client.WithConnect(b.shimAddress(ns), onClose) + return b.shimConfig(ns, c, ropts), client.WithConnect(b.shimAddress(ns), onClose) } } @@ -130,16 +131,18 @@ func (b *bundle) shimAddress(namespace string) string { return filepath.Join(string(filepath.Separator), "containerd-shim", namespace, b.id, "shim.sock") } -func (b *bundle) shimConfig(namespace string, runcOptions *runctypes.RuncOptions) shim.Config { +func (b *bundle) shimConfig(namespace string, c *Config, runcOptions *runctypes.RuncOptions) shim.Config { var ( criuPath string - runtimeRoot string + runtimeRoot = c.RuntimeRoot systemdCgroup bool ) if runcOptions != nil { criuPath = runcOptions.CriuPath systemdCgroup = runcOptions.SystemdCgroup - runtimeRoot = runcOptions.RuntimeRoot + if runcOptions.RuntimeRoot != "" { + runtimeRoot = runcOptions.RuntimeRoot + } } return shim.Config{ Path: b.path, diff --git a/vendor/github.com/containerd/containerd/linux/proc/io.go b/vendor/github.com/containerd/containerd/linux/proc/io.go index 335b8b89c..96b759cf9 100644 --- a/vendor/github.com/containerd/containerd/linux/proc/io.go +++ b/vendor/github.com/containerd/containerd/linux/proc/io.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "io" + "os" "sync" "syscall" @@ -37,44 +38,75 @@ var bufPool = sync.Pool{ } func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error { - for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){ - stdout: func(wc io.WriteCloser, rc io.Closer) { - wg.Add(1) - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - io.CopyBuffer(wc, rio.Stdout(), *p) - wg.Done() - wc.Close() - rc.Close() - }() - }, - stderr: func(wc io.WriteCloser, rc io.Closer) { - wg.Add(1) - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - - io.CopyBuffer(wc, rio.Stderr(), *p) - wg.Done() - wc.Close() - rc.Close() - }() + var sameFile io.WriteCloser + for _, i := range []struct { + name string + dest func(wc io.WriteCloser, rc io.Closer) + }{ + { + name: stdout, + dest: func(wc io.WriteCloser, rc io.Closer) { + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(wc, rio.Stdout(), *p) + wg.Done() + wc.Close() + if rc != nil { + rc.Close() + } + }() + }, + }, { + name: stderr, + dest: func(wc io.WriteCloser, rc io.Closer) { + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(wc, rio.Stderr(), *p) + wg.Done() + wc.Close() + if rc != nil { + rc.Close() + } + }() + }, }, } { - fw, err := fifo.OpenFifo(ctx, name, syscall.O_WRONLY, 0) + ok, err := isFifo(i.name) if err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err) + return err } - fr, err := fifo.OpenFifo(ctx, name, syscall.O_RDONLY, 0) - if err != nil { - return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err) + var ( + fw io.WriteCloser + fr io.Closer + ) + if ok { + if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + } + if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + } + } else { + if sameFile != nil { + i.dest(sameFile, nil) + continue + } + if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err) + } + if stdout == stderr { + sameFile = fw + } } - dest(fw, fr) + i.dest(fw, fr) } if stdin == "" { rio.Stdin().Close() @@ -96,3 +128,19 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w }() return nil } + +// isFifo checks if a file is a fifo +// if the file does not exist then it returns false +func isFifo(path string) (bool, error) { + stat, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + if stat.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/containerd/containerd/linux/runtime.go b/vendor/github.com/containerd/containerd/linux/runtime.go index 1e1b77da7..2bf697831 100644 --- a/vendor/github.com/containerd/containerd/linux/runtime.go +++ b/vendor/github.com/containerd/containerd/linux/runtime.go @@ -186,7 +186,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts } }() - shimopt := ShimLocal(r.events) + shimopt := ShimLocal(r.config, r.events) if !r.config.NoShim { var cgroup string if opts.Options != nil { @@ -224,7 +224,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts }).Warn("failed to clen up after killed shim") } } - shimopt = ShimRemote(r.config.Shim, r.address, cgroup, r.config.ShimDebug, exitHandler) + shimopt = ShimRemote(r.config, r.address, cgroup, exitHandler) } s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts) @@ -396,7 +396,7 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { ) ctx = namespaces.WithNamespace(ctx, ns) pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) - s, err := bundle.NewShimClient(ctx, ns, ShimConnect(func() { + s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() { err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid) if err != nil { log.G(ctx).WithError(err).WithField("bundle", bundle.path). diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go index f08e1d46c..7296d8caa 100644 --- a/vendor/github.com/containerd/containerd/metadata/db.go +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -195,6 +195,15 @@ func (m *DB) Snapshotter(name string) snapshots.Snapshotter { return sn } +// Snapshotters returns all available snapshotters. +func (m *DB) Snapshotters() map[string]snapshots.Snapshotter { + ss := make(map[string]snapshots.Snapshotter, len(m.ss)) + for n, sn := range m.ss { + ss[n] = sn + } + return ss +} + // View runs a readonly transaction on the metadata store. func (m *DB) View(fn func(*bolt.Tx) error) error { return m.db.View(fn) diff --git a/vendor/github.com/containerd/containerd/mount/temp.go b/vendor/github.com/containerd/containerd/mount/temp.go index ec7a06bcb..5994759f3 100644 --- a/vendor/github.com/containerd/containerd/mount/temp.go +++ b/vendor/github.com/containerd/containerd/mount/temp.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/temp_unix.go b/vendor/github.com/containerd/containerd/mount/temp_unix.go index 770631362..fcf90d6fb 100644 --- a/vendor/github.com/containerd/containerd/mount/temp_unix.go +++ b/vendor/github.com/containerd/containerd/mount/temp_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/temp_unsupported.go b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go index d3a188fc0..de3f4163a 100644 --- a/vendor/github.com/containerd/containerd/mount/temp_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go @@ -1,5 +1,21 @@ // +build windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount // SetTempMountLocation sets the temporary mount location diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 77b6d8410..1f91ed288 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -122,18 +122,27 @@ var ( // Matcher matches platforms specifications, provided by an image or runtime. type Matcher interface { - Spec() specs.Platform Match(platform specs.Platform) bool } +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: platform, + } +} + type matcher struct { specs.Platform } -func (m *matcher) Spec() specs.Platform { - return m.Platform -} - func (m *matcher) Match(platform specs.Platform) bool { normalized := Normalize(platform) return m.OS == normalized.OS && @@ -153,19 +162,17 @@ func (m *matcher) String() string { // value will be matched against the known set of operating systems, then fall // back to the known set of architectures. The missing component will be // inferred based on the local environment. -// -// Applications should opt to use `Match` over directly parsing specifiers. -func Parse(specifier string) (Matcher, error) { +func Parse(specifier string) (specs.Platform, error) { if strings.Contains(specifier, "*") { // TODO(stevvooe): need to work out exact wildcard handling - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) } parts := strings.Split(specifier, "/") for _, part := range parts { if !specifierRe.MatchString(part) { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) } } @@ -183,35 +190,35 @@ func Parse(specifier string) (Matcher, error) { p.Architecture = runtime.GOARCH if p.Architecture == "arm" { // TODO(stevvooe): Resolve arm variant, if not v6 (default) - return nil, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented") + return specs.Platform{}, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented") } - return &matcher{p}, nil + return p, nil } p.Architecture, p.Variant = normalizeArch(parts[0], "") if isKnownArch(p.Architecture) { p.OS = runtime.GOOS - return &matcher{p}, nil + return p, nil } - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) case 2: // In this case, we treat as a regular os/arch pair. We don't care // about whether or not we know of the platform. p.OS = normalizeOS(parts[0]) p.Architecture, p.Variant = normalizeArch(parts[1], "") - return &matcher{p}, nil + return p, nil case 3: // we have a fully specified variant, this is rare p.OS = normalizeOS(parts[0]) p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) - return &matcher{p}, nil + return p, nil } - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) } // Format returns a string specifier from the provided platform specification. diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index 470429a0c..584547aff 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -58,6 +58,8 @@ const ( AllPlugins Type = "*" // RuntimePlugin implements a runtime RuntimePlugin Type = "io.containerd.runtime.v1" + // ServicePlugin implements a internal service + ServicePlugin Type = "io.containerd.service.v1" // GRPCPlugin implements a grpc service GRPCPlugin Type = "io.containerd.grpc.v1" // SnapshotPlugin implements a snapshotter diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 06b1724b4..371027489 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -498,7 +498,8 @@ func (r *dockerBase) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) ( // Registries without support for POST may return 404 for POST /v2/token. // As of September 2017, GCR is known to return 404. - if (resp.StatusCode == 405 && r.username != "") || resp.StatusCode == 404 { + // As of February 2018, JFrog Artifactory is known to return 401. + if (resp.StatusCode == 405 && r.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { return r.getToken(ctx, to) } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB diff --git a/vendor/github.com/containerd/containerd/services.go b/vendor/github.com/containerd/containerd/services.go new file mode 100644 index 000000000..daa7b897e --- /dev/null +++ b/vendor/github.com/containerd/containerd/services.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + containersapi "github.com/containerd/containerd/api/services/containers/v1" + "github.com/containerd/containerd/api/services/diff/v1" + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/api/services/leases/v1" + namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1" + "github.com/containerd/containerd/api/services/tasks/v1" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/snapshots" +) + +type services struct { + contentStore content.Store + imageStore images.Store + containerStore containers.Store + namespaceStore namespaces.Store + snapshotters map[string]snapshots.Snapshotter + taskService tasks.TasksClient + diffService DiffService + eventService EventService + leasesService leases.LeasesClient +} + +// ServicesOpt allows callers to set options on the services +type ServicesOpt func(c *services) + +// WithContentStore sets the content store. +func WithContentStore(contentStore content.Store) ServicesOpt { + return func(s *services) { + s.contentStore = contentStore + } +} + +// WithImageService sets the image service. +func WithImageService(imageService imagesapi.ImagesClient) ServicesOpt { + return func(s *services) { + s.imageStore = NewImageStoreFromClient(imageService) + } +} + +// WithSnapshotters sets the snapshotters. +func WithSnapshotters(snapshotters map[string]snapshots.Snapshotter) ServicesOpt { + return func(s *services) { + s.snapshotters = make(map[string]snapshots.Snapshotter) + for n, sn := range snapshotters { + s.snapshotters[n] = sn + } + } +} + +// WithContainerService sets the container service. +func WithContainerService(containerService containersapi.ContainersClient) ServicesOpt { + return func(s *services) { + s.containerStore = NewRemoteContainerStore(containerService) + } +} + +// WithTaskService sets the task service. +func WithTaskService(taskService tasks.TasksClient) ServicesOpt { + return func(s *services) { + s.taskService = taskService + } +} + +// WithDiffService sets the diff service. +func WithDiffService(diffService diff.DiffClient) ServicesOpt { + return func(s *services) { + s.diffService = NewDiffServiceFromClient(diffService) + } +} + +// WithEventService sets the event service. +func WithEventService(eventService EventService) ServicesOpt { + return func(s *services) { + s.eventService = eventService + } +} + +// WithNamespaceService sets the namespace service. +func WithNamespaceService(namespaceService namespacesapi.NamespacesClient) ServicesOpt { + return func(s *services) { + s.namespaceStore = NewNamespaceStoreFromClient(namespaceService) + } +} + +// WithLeasesService sets the lease service. +func WithLeasesService(leasesService leases.LeasesClient) ServicesOpt { + return func(s *services) { + s.leasesService = leasesService + } +} diff --git a/vendor/github.com/containerd/containerd/services/containers/local.go b/vendor/github.com/containerd/containerd/services/containers/local.go new file mode 100644 index 000000000..1e59da6c3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/containers/local.go @@ -0,0 +1,189 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containers + +import ( + "github.com/boltdb/bolt" + eventstypes "github.com/containerd/containerd/api/events" + api "github.com/containerd/containerd/api/services/containers/v1" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.ContainersService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return &local{ + db: m.(*metadata.DB), + publisher: ic.Events, + }, nil + }, + }) +} + +type local struct { + db *metadata.DB + publisher events.Publisher +} + +var _ api.ContainersClient = &local{} + +func (l *local) Get(ctx context.Context, req *api.GetContainerRequest, _ ...grpc.CallOption) (*api.GetContainerResponse, error) { + var resp api.GetContainerResponse + + return &resp, errdefs.ToGRPC(l.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { + container, err := store.Get(ctx, req.ID) + if err != nil { + return err + } + containerpb := containerToProto(&container) + resp.Container = containerpb + + return nil + })) +} + +func (l *local) List(ctx context.Context, req *api.ListContainersRequest, _ ...grpc.CallOption) (*api.ListContainersResponse, error) { + var resp api.ListContainersResponse + + return &resp, errdefs.ToGRPC(l.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { + containers, err := store.List(ctx, req.Filters...) + if err != nil { + return err + } + + resp.Containers = containersToProto(containers) + return nil + })) +} + +func (l *local) Create(ctx context.Context, req *api.CreateContainerRequest, _ ...grpc.CallOption) (*api.CreateContainerResponse, error) { + var resp api.CreateContainerResponse + + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + container := containerFromProto(&req.Container) + + created, err := store.Create(ctx, container) + if err != nil { + return err + } + + resp.Container = containerToProto(&created) + + return nil + }); err != nil { + return &resp, errdefs.ToGRPC(err) + } + if err := l.publisher.Publish(ctx, "/containers/create", &eventstypes.ContainerCreate{ + ID: resp.Container.ID, + Image: resp.Container.Image, + Runtime: &eventstypes.ContainerCreate_Runtime{ + Name: resp.Container.Runtime.Name, + Options: resp.Container.Runtime.Options, + }, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (l *local) Update(ctx context.Context, req *api.UpdateContainerRequest, _ ...grpc.CallOption) (*api.UpdateContainerResponse, error) { + if req.Container.ID == "" { + return nil, status.Errorf(codes.InvalidArgument, "Container.ID required") + } + var ( + resp api.UpdateContainerResponse + container = containerFromProto(&req.Container) + ) + + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + var fieldpaths []string + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + for _, path := range req.UpdateMask.Paths { + fieldpaths = append(fieldpaths, path) + } + } + + updated, err := store.Update(ctx, container, fieldpaths...) + if err != nil { + return err + } + + resp.Container = containerToProto(&updated) + return nil + }); err != nil { + return &resp, errdefs.ToGRPC(err) + } + + if err := l.publisher.Publish(ctx, "/containers/update", &eventstypes.ContainerUpdate{ + ID: resp.Container.ID, + Image: resp.Container.Image, + Labels: resp.Container.Labels, + SnapshotKey: resp.Container.SnapshotKey, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (l *local) Delete(ctx context.Context, req *api.DeleteContainerRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + return store.Delete(ctx, req.ID) + }); err != nil { + return &ptypes.Empty{}, errdefs.ToGRPC(err) + } + + if err := l.publisher.Publish(ctx, "/containers/delete", &eventstypes.ContainerDelete{ + ID: req.ID, + }); err != nil { + return &ptypes.Empty{}, err + } + + return &ptypes.Empty{}, nil +} + +func (l *local) withStore(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) func(tx *bolt.Tx) error { + return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewContainerStore(tx)) } +} + +func (l *local) withStoreView(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { + return l.db.View(l.withStore(ctx, fn)) +} + +func (l *local) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { + return l.db.Update(l.withStore(ctx, fn)) +} diff --git a/vendor/github.com/containerd/containerd/services/containers/service.go b/vendor/github.com/containerd/containerd/services/containers/service.go index 162cae630..f25c0b0ff 100644 --- a/vendor/github.com/containerd/containerd/services/containers/service.go +++ b/vendor/github.com/containerd/containerd/services/containers/service.go @@ -17,19 +17,13 @@ package containers import ( - "github.com/boltdb/bolt" - eventstypes "github.com/containerd/containerd/api/events" api "github.com/containerd/containerd/api/services/containers/v1" - "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) func init() { @@ -37,27 +31,31 @@ func init() { Type: plugin.GRPCPlugin, ID: "containers", Requires: []plugin.Type{ - plugin.MetadataPlugin, + plugin.ServicePlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) + plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err } - return NewService(m.(*metadata.DB), ic.Events), nil + p, ok := plugins[services.ContainersService] + if !ok { + return nil, errors.New("containers service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(api.ContainersClient)}, nil }, }) } type service struct { - db *metadata.DB - publisher events.Publisher + local api.ContainersClient } -// NewService returns the container GRPC server -func NewService(db *metadata.DB, publisher events.Publisher) api.ContainersServer { - return &service{db: db, publisher: publisher} -} +var _ api.ContainersServer = &service{} func (s *service) Register(server *grpc.Server) error { api.RegisterContainersServer(server, s) @@ -65,129 +63,21 @@ func (s *service) Register(server *grpc.Server) error { } func (s *service) Get(ctx context.Context, req *api.GetContainerRequest) (*api.GetContainerResponse, error) { - var resp api.GetContainerResponse - - return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { - container, err := store.Get(ctx, req.ID) - if err != nil { - return err - } - containerpb := containerToProto(&container) - resp.Container = containerpb - - return nil - })) + return s.local.Get(ctx, req) } func (s *service) List(ctx context.Context, req *api.ListContainersRequest) (*api.ListContainersResponse, error) { - var resp api.ListContainersResponse - - return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { - containers, err := store.List(ctx, req.Filters...) - if err != nil { - return err - } - - resp.Containers = containersToProto(containers) - return nil - })) + return s.local.List(ctx, req) } func (s *service) Create(ctx context.Context, req *api.CreateContainerRequest) (*api.CreateContainerResponse, error) { - var resp api.CreateContainerResponse - - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { - container := containerFromProto(&req.Container) - - created, err := store.Create(ctx, container) - if err != nil { - return err - } - - resp.Container = containerToProto(&created) - - return nil - }); err != nil { - return &resp, errdefs.ToGRPC(err) - } - if err := s.publisher.Publish(ctx, "/containers/create", &eventstypes.ContainerCreate{ - ID: resp.Container.ID, - Image: resp.Container.Image, - Runtime: &eventstypes.ContainerCreate_Runtime{ - Name: resp.Container.Runtime.Name, - Options: resp.Container.Runtime.Options, - }, - }); err != nil { - return &resp, err - } - - return &resp, nil + return s.local.Create(ctx, req) } func (s *service) Update(ctx context.Context, req *api.UpdateContainerRequest) (*api.UpdateContainerResponse, error) { - if req.Container.ID == "" { - return nil, status.Errorf(codes.InvalidArgument, "Container.ID required") - } - var ( - resp api.UpdateContainerResponse - container = containerFromProto(&req.Container) - ) - - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { - var fieldpaths []string - if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { - for _, path := range req.UpdateMask.Paths { - fieldpaths = append(fieldpaths, path) - } - } - - updated, err := store.Update(ctx, container, fieldpaths...) - if err != nil { - return err - } - - resp.Container = containerToProto(&updated) - return nil - }); err != nil { - return &resp, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/containers/update", &eventstypes.ContainerUpdate{ - ID: resp.Container.ID, - Image: resp.Container.Image, - Labels: resp.Container.Labels, - SnapshotKey: resp.Container.SnapshotKey, - }); err != nil { - return &resp, err - } - - return &resp, nil + return s.local.Update(ctx, req) } func (s *service) Delete(ctx context.Context, req *api.DeleteContainerRequest) (*ptypes.Empty, error) { - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { - return store.Delete(ctx, req.ID) - }); err != nil { - return &ptypes.Empty{}, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/containers/delete", &eventstypes.ContainerDelete{ - ID: req.ID, - }); err != nil { - return &ptypes.Empty{}, err - } - - return &ptypes.Empty{}, nil -} - -func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) func(tx *bolt.Tx) error { - return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewContainerStore(tx)) } -} - -func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { - return s.db.View(s.withStore(ctx, fn)) -} - -func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { - return s.db.Update(s.withStore(ctx, fn)) + return s.local.Delete(ctx, req) } diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go index d24fb6eba..a2c87a4d5 100644 --- a/vendor/github.com/containerd/containerd/services/content/service.go +++ b/vendor/github.com/containerd/containerd/services/content/service.go @@ -20,14 +20,12 @@ import ( "io" "sync" - eventstypes "github.com/containerd/containerd/api/events" api "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -39,8 +37,7 @@ import ( ) type service struct { - store content.Store - publisher events.Publisher + store content.Store } var bufPool = sync.Pool{ @@ -57,26 +54,29 @@ func init() { Type: plugin.GRPCPlugin, ID: "content", Requires: []plugin.Type{ - plugin.MetadataPlugin, + plugin.ServicePlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) + plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err } - - s, err := NewService(m.(*metadata.DB).ContentStore(), ic.Events) - return s, err + p, ok := plugins[services.ContentService] + if !ok { + return nil, errors.New("content store service not found") + } + cs, err := p.Instance() + if err != nil { + return nil, err + } + return newService(cs.(content.Store)), nil }, }) } -// NewService returns the content GRPC server -func NewService(cs content.Store, publisher events.Publisher) (api.ContentServer, error) { - return &service{ - store: cs, - publisher: publisher, - }, nil +// newService returns the content GRPC server +func newService(cs content.Store) api.ContentServer { + return &service{store: cs} } func (s *service) Register(server *grpc.Server) error { @@ -166,12 +166,6 @@ func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*p return nil, errdefs.ToGRPC(err) } - if err := s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{ - Digest: req.Digest, - }); err != nil { - return nil, err - } - return &ptypes.Empty{}, nil } diff --git a/vendor/github.com/containerd/containerd/services/content/store.go b/vendor/github.com/containerd/containerd/services/content/store.go new file mode 100644 index 000000000..3de91d37c --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/content/store.go @@ -0,0 +1,71 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + digest "github.com/opencontainers/go-digest" +) + +// store wraps content.Store with proper event published. +type store struct { + content.Store + publisher events.Publisher +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.ContentService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + s, err := newContentStore(m.(*metadata.DB).ContentStore(), ic.Events) + return s, err + }, + }) +} + +func newContentStore(cs content.Store, publisher events.Publisher) (content.Store, error) { + return &store{ + Store: cs, + publisher: publisher, + }, nil +} + +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + if err := s.Store.Delete(ctx, dgst); err != nil { + return err + } + // TODO: Consider whether we should return error here. + return s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{ + Digest: dgst, + }) +} diff --git a/vendor/github.com/containerd/containerd/services/diff/local.go b/vendor/github.com/containerd/containerd/services/diff/local.go new file mode 100644 index 000000000..0335b1409 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/local.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + diffapi "github.com/containerd/containerd/api/services/diff/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type config struct { + // Order is the order of preference in which to try diff algorithms, the + // first differ which is supported is used. + // Note when multiple differs may be supported, this order will be + // respected for which is choosen. Each differ should return the same + // correct output, allowing any ordering to be used to prefer + // more optimimal implementations. + Order []string `toml:"default"` +} + +type differ interface { + diff.Comparer + diff.Applier +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.DiffService, + Requires: []plugin.Type{ + plugin.DiffPlugin, + }, + Config: defaultDifferConfig, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + differs, err := ic.GetByType(plugin.DiffPlugin) + if err != nil { + return nil, err + } + + orderedNames := ic.Config.(*config).Order + ordered := make([]differ, len(orderedNames)) + for i, n := range orderedNames { + differp, ok := differs[n] + if !ok { + return nil, errors.Errorf("needed differ not loaded: %s", n) + } + d, err := differp.Instance() + if err != nil { + return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n) + } + + ordered[i], ok = d.(differ) + if !ok { + return nil, errors.Errorf("differ does not implement Comparer and Applier interface: %s", n) + } + } + + return &local{ + differs: ordered, + }, nil + }, + }) +} + +type local struct { + differs []differ +} + +var _ diffapi.DiffClient = &local{} + +func (l *local) Apply(ctx context.Context, er *diffapi.ApplyRequest, _ ...grpc.CallOption) (*diffapi.ApplyResponse, error) { + var ( + ocidesc ocispec.Descriptor + err error + desc = toDescriptor(er.Diff) + mounts = toMounts(er.Mounts) + ) + + for _, differ := range l.differs { + ocidesc, err = differ.Apply(ctx, desc, mounts) + if !errdefs.IsNotImplemented(err) { + break + } + } + + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &diffapi.ApplyResponse{ + Applied: fromDescriptor(ocidesc), + }, nil + +} + +func (l *local) Diff(ctx context.Context, dr *diffapi.DiffRequest, _ ...grpc.CallOption) (*diffapi.DiffResponse, error) { + var ( + ocidesc ocispec.Descriptor + err error + aMounts = toMounts(dr.Left) + bMounts = toMounts(dr.Right) + ) + + var opts []diff.Opt + if dr.MediaType != "" { + opts = append(opts, diff.WithMediaType(dr.MediaType)) + } + if dr.Ref != "" { + opts = append(opts, diff.WithReference(dr.Ref)) + } + if dr.Labels != nil { + opts = append(opts, diff.WithLabels(dr.Labels)) + } + + for _, d := range l.differs { + ocidesc, err = d.Compare(ctx, aMounts, bMounts, opts...) + if !errdefs.IsNotImplemented(err) { + break + } + } + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &diffapi.DiffResponse{ + Diff: fromDescriptor(ocidesc), + }, nil +} + +func toMounts(apim []*types.Mount) []mount.Mount { + mounts := make([]mount.Mount, len(apim)) + for i, m := range apim { + mounts[i] = mount.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + } + } + return mounts +} + +func toDescriptor(d *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest, + Size: d.Size_, + } +} + +func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { + return &types.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest, + Size_: d.Size, + } +} diff --git a/vendor/github.com/containerd/containerd/services/diff/service.go b/vendor/github.com/containerd/containerd/services/diff/service.go index 7053dace1..e12c4a062 100644 --- a/vendor/github.com/containerd/containerd/services/diff/service.go +++ b/vendor/github.com/containerd/containerd/services/diff/service.go @@ -18,163 +18,53 @@ package diff import ( diffapi "github.com/containerd/containerd/api/services/diff/v1" - "github.com/containerd/containerd/api/types" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/mount" "github.com/containerd/containerd/plugin" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/containerd/containerd/services" "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" ) -type config struct { - // Order is the order of preference in which to try diff algorithms, the - // first differ which is supported is used. - // Note when multiple differs may be supported, this order will be - // respected for which is choosen. Each differ should return the same - // correct output, allowing any ordering to be used to prefer - // more optimimal implementations. - Order []string `toml:"default"` -} - -type differ interface { - diff.Comparer - diff.Applier -} - func init() { plugin.Register(&plugin.Registration{ Type: plugin.GRPCPlugin, ID: "diff", Requires: []plugin.Type{ - plugin.DiffPlugin, + plugin.ServicePlugin, }, - Config: defaultDifferConfig, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - differs, err := ic.GetByType(plugin.DiffPlugin) + plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err } - - orderedNames := ic.Config.(*config).Order - ordered := make([]differ, len(orderedNames)) - for i, n := range orderedNames { - differp, ok := differs[n] - if !ok { - return nil, errors.Errorf("needed differ not loaded: %s", n) - } - d, err := differp.Instance() - if err != nil { - return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n) - } - - ordered[i], ok = d.(differ) - if !ok { - return nil, errors.Errorf("differ does not implement Comparer and Applier interface: %s", n) - } + p, ok := plugins[services.DiffService] + if !ok { + return nil, errors.New("diff service not found") } - - return &service{ - differs: ordered, - }, nil + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(diffapi.DiffClient)}, nil }, }) } type service struct { - differs []differ + local diffapi.DiffClient } +var _ diffapi.DiffServer = &service{} + func (s *service) Register(gs *grpc.Server) error { diffapi.RegisterDiffServer(gs, s) return nil } func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) { - var ( - ocidesc ocispec.Descriptor - err error - desc = toDescriptor(er.Diff) - mounts = toMounts(er.Mounts) - ) - - for _, differ := range s.differs { - ocidesc, err = differ.Apply(ctx, desc, mounts) - if !errdefs.IsNotImplemented(err) { - break - } - } - - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &diffapi.ApplyResponse{ - Applied: fromDescriptor(ocidesc), - }, nil - + return s.local.Apply(ctx, er) } func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) { - var ( - ocidesc ocispec.Descriptor - err error - aMounts = toMounts(dr.Left) - bMounts = toMounts(dr.Right) - ) - - var opts []diff.Opt - if dr.MediaType != "" { - opts = append(opts, diff.WithMediaType(dr.MediaType)) - } - if dr.Ref != "" { - opts = append(opts, diff.WithReference(dr.Ref)) - } - if dr.Labels != nil { - opts = append(opts, diff.WithLabels(dr.Labels)) - } - - for _, d := range s.differs { - ocidesc, err = d.Compare(ctx, aMounts, bMounts, opts...) - if !errdefs.IsNotImplemented(err) { - break - } - } - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &diffapi.DiffResponse{ - Diff: fromDescriptor(ocidesc), - }, nil -} - -func toMounts(apim []*types.Mount) []mount.Mount { - mounts := make([]mount.Mount, len(apim)) - for i, m := range apim { - mounts[i] = mount.Mount{ - Type: m.Type, - Source: m.Source, - Options: m.Options, - } - } - return mounts -} - -func toDescriptor(d *types.Descriptor) ocispec.Descriptor { - return ocispec.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size: d.Size_, - } -} - -func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { - return &types.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size_: d.Size, - } + return s.local.Diff(ctx, dr) } diff --git a/vendor/github.com/containerd/containerd/services/images/local.go b/vendor/github.com/containerd/containerd/services/images/local.go new file mode 100644 index 000000000..1cca1a42a --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/images/local.go @@ -0,0 +1,183 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + gocontext "context" + + eventstypes "github.com/containerd/containerd/api/events" + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.ImagesService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + plugin.GCPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + g, err := ic.Get(plugin.GCPlugin) + if err != nil { + return nil, err + } + + return &local{ + store: metadata.NewImageStore(m.(*metadata.DB)), + publisher: ic.Events, + gc: g.(gcScheduler), + }, nil + }, + }) +} + +type gcScheduler interface { + ScheduleAndWait(gocontext.Context) (gc.Stats, error) +} + +type local struct { + store images.Store + gc gcScheduler + publisher events.Publisher +} + +var _ imagesapi.ImagesClient = &local{} + +func (l *local) Get(ctx context.Context, req *imagesapi.GetImageRequest, _ ...grpc.CallOption) (*imagesapi.GetImageResponse, error) { + image, err := l.store.Get(ctx, req.Name) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + imagepb := imageToProto(&image) + return &imagesapi.GetImageResponse{ + Image: &imagepb, + }, nil +} + +func (l *local) List(ctx context.Context, req *imagesapi.ListImagesRequest, _ ...grpc.CallOption) (*imagesapi.ListImagesResponse, error) { + images, err := l.store.List(ctx, req.Filters...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &imagesapi.ListImagesResponse{ + Images: imagesToProto(images), + }, nil +} + +func (l *local) Create(ctx context.Context, req *imagesapi.CreateImageRequest, _ ...grpc.CallOption) (*imagesapi.CreateImageResponse, error) { + log.G(ctx).WithField("name", req.Image.Name).WithField("target", req.Image.Target.Digest).Debugf("create image") + if req.Image.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") + } + + var ( + image = imageFromProto(&req.Image) + resp imagesapi.CreateImageResponse + ) + created, err := l.store.Create(ctx, image) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + resp.Image = imageToProto(&created) + + if err := l.publisher.Publish(ctx, "/images/create", &eventstypes.ImageCreate{ + Name: resp.Image.Name, + Labels: resp.Image.Labels, + }); err != nil { + return nil, err + } + + return &resp, nil + +} + +func (l *local) Update(ctx context.Context, req *imagesapi.UpdateImageRequest, _ ...grpc.CallOption) (*imagesapi.UpdateImageResponse, error) { + if req.Image.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") + } + + var ( + image = imageFromProto(&req.Image) + resp imagesapi.UpdateImageResponse + fieldpaths []string + ) + + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + for _, path := range req.UpdateMask.Paths { + fieldpaths = append(fieldpaths, path) + } + } + + updated, err := l.store.Update(ctx, image, fieldpaths...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + resp.Image = imageToProto(&updated) + + if err := l.publisher.Publish(ctx, "/images/update", &eventstypes.ImageUpdate{ + Name: resp.Image.Name, + Labels: resp.Image.Labels, + }); err != nil { + return nil, err + } + + return &resp, nil +} + +func (l *local) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + log.G(ctx).WithField("name", req.Name).Debugf("delete image") + + if err := l.store.Delete(ctx, req.Name); err != nil { + return nil, errdefs.ToGRPC(err) + } + + if err := l.publisher.Publish(ctx, "/images/delete", &eventstypes.ImageDelete{ + Name: req.Name, + }); err != nil { + return nil, err + } + + if req.Sync { + if _, err := l.gc.ScheduleAndWait(ctx); err != nil { + return nil, err + } + } + + return &ptypes.Empty{}, nil +} diff --git a/vendor/github.com/containerd/containerd/services/images/service.go b/vendor/github.com/containerd/containerd/services/images/service.go index d17c684ae..a025c66c9 100644 --- a/vendor/github.com/containerd/containerd/services/images/service.go +++ b/vendor/github.com/containerd/containerd/services/images/service.go @@ -17,22 +17,13 @@ package images import ( - gocontext "context" - - eventstypes "github.com/containerd/containerd/api/events" imagesapi "github.com/containerd/containerd/api/services/images/v1" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/gc" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) func init() { @@ -40,42 +31,31 @@ func init() { Type: plugin.GRPCPlugin, ID: "images", Requires: []plugin.Type{ - plugin.MetadataPlugin, - plugin.GCPlugin, + plugin.ServicePlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) + plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err } - g, err := ic.Get(plugin.GCPlugin) + p, ok := plugins[services.ImagesService] + if !ok { + return nil, errors.New("images service not found") + } + i, err := p.Instance() if err != nil { return nil, err } - - return NewService(metadata.NewImageStore(m.(*metadata.DB)), ic.Events, g.(gcScheduler)), nil + return &service{local: i.(imagesapi.ImagesClient)}, nil }, }) } -type gcScheduler interface { - ScheduleAndWait(gocontext.Context) (gc.Stats, error) -} - type service struct { - store images.Store - gc gcScheduler - publisher events.Publisher + local imagesapi.ImagesClient } -// NewService returns the GRPC image server -func NewService(is images.Store, publisher events.Publisher, gc gcScheduler) imagesapi.ImagesServer { - return &service{ - store: is, - gc: gc, - publisher: publisher, - } -} +var _ imagesapi.ImagesServer = &service{} func (s *service) Register(server *grpc.Server) error { imagesapi.RegisterImagesServer(server, s) @@ -83,108 +63,21 @@ func (s *service) Register(server *grpc.Server) error { } func (s *service) Get(ctx context.Context, req *imagesapi.GetImageRequest) (*imagesapi.GetImageResponse, error) { - image, err := s.store.Get(ctx, req.Name) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - imagepb := imageToProto(&image) - return &imagesapi.GetImageResponse{ - Image: &imagepb, - }, nil + return s.local.Get(ctx, req) } func (s *service) List(ctx context.Context, req *imagesapi.ListImagesRequest) (*imagesapi.ListImagesResponse, error) { - images, err := s.store.List(ctx, req.Filters...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &imagesapi.ListImagesResponse{ - Images: imagesToProto(images), - }, nil + return s.local.List(ctx, req) } func (s *service) Create(ctx context.Context, req *imagesapi.CreateImageRequest) (*imagesapi.CreateImageResponse, error) { - log.G(ctx).WithField("name", req.Image.Name).WithField("target", req.Image.Target.Digest).Debugf("create image") - if req.Image.Name == "" { - return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") - } - - var ( - image = imageFromProto(&req.Image) - resp imagesapi.CreateImageResponse - ) - created, err := s.store.Create(ctx, image) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - resp.Image = imageToProto(&created) - - if err := s.publisher.Publish(ctx, "/images/create", &eventstypes.ImageCreate{ - Name: resp.Image.Name, - Labels: resp.Image.Labels, - }); err != nil { - return nil, err - } - - return &resp, nil - + return s.local.Create(ctx, req) } func (s *service) Update(ctx context.Context, req *imagesapi.UpdateImageRequest) (*imagesapi.UpdateImageResponse, error) { - if req.Image.Name == "" { - return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") - } - - var ( - image = imageFromProto(&req.Image) - resp imagesapi.UpdateImageResponse - fieldpaths []string - ) - - if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { - for _, path := range req.UpdateMask.Paths { - fieldpaths = append(fieldpaths, path) - } - } - - updated, err := s.store.Update(ctx, image, fieldpaths...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - resp.Image = imageToProto(&updated) - - if err := s.publisher.Publish(ctx, "/images/update", &eventstypes.ImageUpdate{ - Name: resp.Image.Name, - Labels: resp.Image.Labels, - }); err != nil { - return nil, err - } - - return &resp, nil + return s.local.Update(ctx, req) } func (s *service) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest) (*ptypes.Empty, error) { - log.G(ctx).WithField("name", req.Name).Debugf("delete image") - - if err := s.store.Delete(ctx, req.Name); err != nil { - return nil, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/images/delete", &eventstypes.ImageDelete{ - Name: req.Name, - }); err != nil { - return nil, err - } - - if req.Sync { - if _, err := s.gc.ScheduleAndWait(ctx); err != nil { - return nil, err - } - } - - return &ptypes.Empty{}, nil + return s.local.Delete(ctx, req) } diff --git a/vendor/github.com/containerd/containerd/services/leases/local.go b/vendor/github.com/containerd/containerd/services/leases/local.go new file mode 100644 index 000000000..d3e0c2f2c --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/leases/local.go @@ -0,0 +1,120 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "time" + + "google.golang.org/grpc" + + "github.com/boltdb/bolt" + api "github.com/containerd/containerd/api/services/leases/v1" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.LeasesService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return &local{db: m.(*metadata.DB)}, nil + }, + }) +} + +type local struct { + db *metadata.DB +} + +func (l *local) Create(ctx context.Context, r *api.CreateRequest, _ ...grpc.CallOption) (*api.CreateResponse, error) { + lid := r.ID + if lid == "" { + lid = generateLeaseID() + } + var trans metadata.Lease + if err := l.db.Update(func(tx *bolt.Tx) error { + var err error + trans, err = metadata.NewLeaseManager(tx).Create(ctx, lid, r.Labels) + return err + }); err != nil { + return nil, err + } + return &api.CreateResponse{ + Lease: txToGRPC(trans), + }, nil +} + +func (l *local) Delete(ctx context.Context, r *api.DeleteRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + if err := l.db.Update(func(tx *bolt.Tx) error { + return metadata.NewLeaseManager(tx).Delete(ctx, r.ID) + }); err != nil { + return nil, err + } + return &ptypes.Empty{}, nil +} + +func (l *local) List(ctx context.Context, r *api.ListRequest, _ ...grpc.CallOption) (*api.ListResponse, error) { + var leases []metadata.Lease + if err := l.db.View(func(tx *bolt.Tx) error { + var err error + leases, err = metadata.NewLeaseManager(tx).List(ctx, false, r.Filters...) + return err + }); err != nil { + return nil, err + } + + apileases := make([]*api.Lease, len(leases)) + for i := range leases { + apileases[i] = txToGRPC(leases[i]) + } + + return &api.ListResponse{ + Leases: apileases, + }, nil +} + +func txToGRPC(tx metadata.Lease) *api.Lease { + return &api.Lease{ + ID: tx.ID, + Labels: tx.Labels, + CreatedAt: tx.CreatedAt, + // TODO: Snapshots + // TODO: Content + } +} + +func generateLeaseID() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/services/leases/service.go b/vendor/github.com/containerd/containerd/services/leases/service.go index 100052f3a..e9cccf921 100644 --- a/vendor/github.com/containerd/containerd/services/leases/service.go +++ b/vendor/github.com/containerd/containerd/services/leases/service.go @@ -17,18 +17,13 @@ package leases import ( - "crypto/rand" - "encoding/base64" - "fmt" - "time" - "google.golang.org/grpc" - "github.com/boltdb/bolt" api "github.com/containerd/containerd/api/services/leases/v1" - "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -37,27 +32,28 @@ func init() { Type: plugin.GRPCPlugin, ID: "leases", Requires: []plugin.Type{ - plugin.MetadataPlugin, + plugin.ServicePlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) + plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err } - return NewService(m.(*metadata.DB)), nil + p, ok := plugins[services.LeasesService] + if !ok { + return nil, errors.New("leases service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(api.LeasesClient)}, nil }, }) } type service struct { - db *metadata.DB -} - -// NewService returns the GRPC metadata server -func NewService(db *metadata.DB) api.LeasesServer { - return &service{ - db: db, - } + local api.LeasesClient } func (s *service) Register(server *grpc.Server) error { @@ -66,66 +62,13 @@ func (s *service) Register(server *grpc.Server) error { } func (s *service) Create(ctx context.Context, r *api.CreateRequest) (*api.CreateResponse, error) { - lid := r.ID - if lid == "" { - lid = generateLeaseID() - } - var trans metadata.Lease - if err := s.db.Update(func(tx *bolt.Tx) error { - var err error - trans, err = metadata.NewLeaseManager(tx).Create(ctx, lid, r.Labels) - return err - }); err != nil { - return nil, err - } - return &api.CreateResponse{ - Lease: txToGRPC(trans), - }, nil + return s.local.Create(ctx, r) } func (s *service) Delete(ctx context.Context, r *api.DeleteRequest) (*ptypes.Empty, error) { - if err := s.db.Update(func(tx *bolt.Tx) error { - return metadata.NewLeaseManager(tx).Delete(ctx, r.ID) - }); err != nil { - return nil, err - } - return &ptypes.Empty{}, nil + return s.local.Delete(ctx, r) } func (s *service) List(ctx context.Context, r *api.ListRequest) (*api.ListResponse, error) { - var leases []metadata.Lease - if err := s.db.View(func(tx *bolt.Tx) error { - var err error - leases, err = metadata.NewLeaseManager(tx).List(ctx, false, r.Filters...) - return err - }); err != nil { - return nil, err - } - - apileases := make([]*api.Lease, len(leases)) - for i := range leases { - apileases[i] = txToGRPC(leases[i]) - } - - return &api.ListResponse{ - Leases: apileases, - }, nil -} - -func txToGRPC(tx metadata.Lease) *api.Lease { - return &api.Lease{ - ID: tx.ID, - Labels: tx.Labels, - CreatedAt: tx.CreatedAt, - // TODO: Snapshots - // TODO: Content - } -} - -func generateLeaseID() string { - t := time.Now() - var b [3]byte - // Ignore read failures, just decreases uniqueness - rand.Read(b[:]) - return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) + return s.local.List(ctx, r) } diff --git a/vendor/github.com/containerd/containerd/services/namespaces/local.go b/vendor/github.com/containerd/containerd/services/namespaces/local.go new file mode 100644 index 000000000..dfa86740b --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/namespaces/local.go @@ -0,0 +1,223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "strings" + + "github.com/boltdb/bolt" + eventstypes "github.com/containerd/containerd/api/events" + api "github.com/containerd/containerd/api/services/namespaces/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.NamespacesService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return &local{ + db: m.(*metadata.DB), + publisher: ic.Events, + }, nil + }, + }) +} + +// Provide local namespaces service instead of local namespace store, +// because namespace store interface doesn't provide enough functionality +// for namespaces service. +type local struct { + db *metadata.DB + publisher events.Publisher +} + +var _ api.NamespacesClient = &local{} + +func (l *local) Get(ctx context.Context, req *api.GetNamespaceRequest, _ ...grpc.CallOption) (*api.GetNamespaceResponse, error) { + var resp api.GetNamespaceResponse + + return &resp, l.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { + labels, err := store.Labels(ctx, req.Name) + if err != nil { + return errdefs.ToGRPC(err) + } + + resp.Namespace = api.Namespace{ + Name: req.Name, + Labels: labels, + } + + return nil + }) +} + +func (l *local) List(ctx context.Context, req *api.ListNamespacesRequest, _ ...grpc.CallOption) (*api.ListNamespacesResponse, error) { + var resp api.ListNamespacesResponse + + return &resp, l.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { + namespaces, err := store.List(ctx) + if err != nil { + return err + } + + for _, namespace := range namespaces { + labels, err := store.Labels(ctx, namespace) + if err != nil { + // In general, this should be unlikely, since we are holding a + // transaction to service this request. + return errdefs.ToGRPC(err) + } + + resp.Namespaces = append(resp.Namespaces, api.Namespace{ + Name: namespace, + Labels: labels, + }) + } + + return nil + }) +} + +func (l *local) Create(ctx context.Context, req *api.CreateNamespaceRequest, _ ...grpc.CallOption) (*api.CreateNamespaceResponse, error) { + var resp api.CreateNamespaceResponse + + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + if err := store.Create(ctx, req.Namespace.Name, req.Namespace.Labels); err != nil { + return errdefs.ToGRPC(err) + } + + for k, v := range req.Namespace.Labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { + return err + } + } + + resp.Namespace = req.Namespace + return nil + }); err != nil { + return &resp, err + } + + if err := l.publisher.Publish(ctx, "/namespaces/create", &eventstypes.NamespaceCreate{ + Name: req.Namespace.Name, + Labels: req.Namespace.Labels, + }); err != nil { + return &resp, err + } + + return &resp, nil + +} + +func (l *local) Update(ctx context.Context, req *api.UpdateNamespaceRequest, _ ...grpc.CallOption) (*api.UpdateNamespaceResponse, error) { + var resp api.UpdateNamespaceResponse + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + for _, path := range req.UpdateMask.Paths { + switch { + case strings.HasPrefix(path, "labels."): + key := strings.TrimPrefix(path, "labels.") + if err := store.SetLabel(ctx, req.Namespace.Name, key, req.Namespace.Labels[key]); err != nil { + return err + } + default: + return status.Errorf(codes.InvalidArgument, "cannot update %q field", path) + } + } + } else { + // clear out the existing labels and then set them to the incoming request. + // get current set of labels + labels, err := store.Labels(ctx, req.Namespace.Name) + if err != nil { + return errdefs.ToGRPC(err) + } + + for k := range labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, ""); err != nil { + return err + } + } + + for k, v := range req.Namespace.Labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { + return err + } + + } + } + + return nil + }); err != nil { + return &resp, err + } + + if err := l.publisher.Publish(ctx, "/namespaces/update", &eventstypes.NamespaceUpdate{ + Name: req.Namespace.Name, + Labels: req.Namespace.Labels, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (l *local) Delete(ctx context.Context, req *api.DeleteNamespaceRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + if err := l.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + return errdefs.ToGRPC(store.Delete(ctx, req.Name)) + }); err != nil { + return &ptypes.Empty{}, err + } + // set the namespace in the context before publishing the event + ctx = namespaces.WithNamespace(ctx, req.Name) + if err := l.publisher.Publish(ctx, "/namespaces/delete", &eventstypes.NamespaceDelete{ + Name: req.Name, + }); err != nil { + return &ptypes.Empty{}, err + } + + return &ptypes.Empty{}, nil +} + +func (l *local) withStore(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) func(tx *bolt.Tx) error { + return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewNamespaceStore(tx)) } +} + +func (l *local) withStoreView(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { + return l.db.View(l.withStore(ctx, fn)) +} + +func (l *local) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { + return l.db.Update(l.withStore(ctx, fn)) +} diff --git a/vendor/github.com/containerd/containerd/services/namespaces/service.go b/vendor/github.com/containerd/containerd/services/namespaces/service.go index 2c5694012..965590688 100644 --- a/vendor/github.com/containerd/containerd/services/namespaces/service.go +++ b/vendor/github.com/containerd/containerd/services/namespaces/service.go @@ -17,21 +17,13 @@ package namespaces import ( - "strings" - - "github.com/boltdb/bolt" - eventstypes "github.com/containerd/containerd/api/events" api "github.com/containerd/containerd/api/services/namespaces/v1" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) func init() { @@ -39,191 +31,53 @@ func init() { Type: plugin.GRPCPlugin, ID: "namespaces", Requires: []plugin.Type{ - plugin.MetadataPlugin, + plugin.ServicePlugin, }, InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) + plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err } - return NewService(m.(*metadata.DB), ic.Events), nil + p, ok := plugins[services.NamespacesService] + if !ok { + return nil, errors.New("namespaces service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(api.NamespacesClient)}, nil }, }) } type service struct { - db *metadata.DB - publisher events.Publisher + local api.NamespacesClient } var _ api.NamespacesServer = &service{} -// NewService returns the GRPC namespaces server -func NewService(db *metadata.DB, publisher events.Publisher) api.NamespacesServer { - return &service{ - db: db, - publisher: publisher, - } -} - func (s *service) Register(server *grpc.Server) error { api.RegisterNamespacesServer(server, s) return nil } func (s *service) Get(ctx context.Context, req *api.GetNamespaceRequest) (*api.GetNamespaceResponse, error) { - var resp api.GetNamespaceResponse - - return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { - labels, err := store.Labels(ctx, req.Name) - if err != nil { - return errdefs.ToGRPC(err) - } - - resp.Namespace = api.Namespace{ - Name: req.Name, - Labels: labels, - } - - return nil - }) + return s.local.Get(ctx, req) } func (s *service) List(ctx context.Context, req *api.ListNamespacesRequest) (*api.ListNamespacesResponse, error) { - var resp api.ListNamespacesResponse - - return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { - namespaces, err := store.List(ctx) - if err != nil { - return err - } - - for _, namespace := range namespaces { - labels, err := store.Labels(ctx, namespace) - if err != nil { - // In general, this should be unlikely, since we are holding a - // transaction to service this request. - return errdefs.ToGRPC(err) - } - - resp.Namespaces = append(resp.Namespaces, api.Namespace{ - Name: namespace, - Labels: labels, - }) - } - - return nil - }) + return s.local.List(ctx, req) } func (s *service) Create(ctx context.Context, req *api.CreateNamespaceRequest) (*api.CreateNamespaceResponse, error) { - var resp api.CreateNamespaceResponse - - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { - if err := store.Create(ctx, req.Namespace.Name, req.Namespace.Labels); err != nil { - return errdefs.ToGRPC(err) - } - - for k, v := range req.Namespace.Labels { - if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { - return err - } - } - - resp.Namespace = req.Namespace - return nil - }); err != nil { - return &resp, err - } - - if err := s.publisher.Publish(ctx, "/namespaces/create", &eventstypes.NamespaceCreate{ - Name: req.Namespace.Name, - Labels: req.Namespace.Labels, - }); err != nil { - return &resp, err - } - - return &resp, nil - + return s.local.Create(ctx, req) } func (s *service) Update(ctx context.Context, req *api.UpdateNamespaceRequest) (*api.UpdateNamespaceResponse, error) { - var resp api.UpdateNamespaceResponse - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { - if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { - for _, path := range req.UpdateMask.Paths { - switch { - case strings.HasPrefix(path, "labels."): - key := strings.TrimPrefix(path, "labels.") - if err := store.SetLabel(ctx, req.Namespace.Name, key, req.Namespace.Labels[key]); err != nil { - return err - } - default: - return status.Errorf(codes.InvalidArgument, "cannot update %q field", path) - } - } - } else { - // clear out the existing labels and then set them to the incoming request. - // get current set of labels - labels, err := store.Labels(ctx, req.Namespace.Name) - if err != nil { - return errdefs.ToGRPC(err) - } - - for k := range labels { - if err := store.SetLabel(ctx, req.Namespace.Name, k, ""); err != nil { - return err - } - } - - for k, v := range req.Namespace.Labels { - if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { - return err - } - - } - } - - return nil - }); err != nil { - return &resp, err - } - - if err := s.publisher.Publish(ctx, "/namespaces/update", &eventstypes.NamespaceUpdate{ - Name: req.Namespace.Name, - Labels: req.Namespace.Labels, - }); err != nil { - return &resp, err - } - - return &resp, nil + return s.local.Update(ctx, req) } func (s *service) Delete(ctx context.Context, req *api.DeleteNamespaceRequest) (*ptypes.Empty, error) { - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { - return errdefs.ToGRPC(store.Delete(ctx, req.Name)) - }); err != nil { - return &ptypes.Empty{}, err - } - // set the namespace in the context before publishing the event - ctx = namespaces.WithNamespace(ctx, req.Name) - if err := s.publisher.Publish(ctx, "/namespaces/delete", &eventstypes.NamespaceDelete{ - Name: req.Name, - }); err != nil { - return &ptypes.Empty{}, err - } - - return &ptypes.Empty{}, nil -} - -func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) func(tx *bolt.Tx) error { - return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewNamespaceStore(tx)) } -} - -func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { - return s.db.View(s.withStore(ctx, fn)) -} - -func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { - return s.db.Update(s.withStore(ctx, fn)) + return s.local.Delete(ctx, req) } diff --git a/vendor/github.com/containerd/containerd/services/services.go b/vendor/github.com/containerd/containerd/services/services.go new file mode 100644 index 000000000..efc920093 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/services.go @@ -0,0 +1,36 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package services + +const ( + // ContentService is id of content service. + ContentService = "content-service" + // SnapshotsService is id of snapshots service. + SnapshotsService = "snapshots-service" + // ImagesService is id of images service. + ImagesService = "images-service" + // ContainersService is id of containers service. + ContainersService = "containers-service" + // TasksService is id of tasks service. + TasksService = "tasks-service" + // NamespacesService is id of namespaces service. + NamespacesService = "namespaces-service" + // LeasesService is id of leases service. + LeasesService = "leases-service" + // DiffService is id of diff service. + DiffService = "diff-service" +) diff --git a/vendor/github.com/containerd/containerd/services/snapshots/service.go b/vendor/github.com/containerd/containerd/services/snapshots/service.go index 2d997bdc9..1d10fae1e 100644 --- a/vendor/github.com/containerd/containerd/services/snapshots/service.go +++ b/vendor/github.com/containerd/containerd/services/snapshots/service.go @@ -19,17 +19,16 @@ package snapshots import ( gocontext "context" - eventstypes "github.com/containerd/containerd/api/events" snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" "github.com/containerd/containerd/snapshots" ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -39,7 +38,7 @@ func init() { Type: plugin.GRPCPlugin, ID: "snapshots", Requires: []plugin.Type{ - plugin.MetadataPlugin, + plugin.ServicePlugin, }, InitFn: newService, }) @@ -48,20 +47,24 @@ func init() { var empty = &ptypes.Empty{} type service struct { - db *metadata.DB - publisher events.Publisher + ss map[string]snapshots.Snapshotter } func newService(ic *plugin.InitContext) (interface{}, error) { - md, err := ic.Get(plugin.MetadataPlugin) + plugins, err := ic.GetByType(plugin.ServicePlugin) if err != nil { return nil, err } - - return &service{ - db: md.(*metadata.DB), - publisher: ic.Events, - }, nil + p, ok := plugins[services.SnapshotsService] + if !ok { + return nil, errors.New("snapshots service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + ss := i.(map[string]snapshots.Snapshotter) + return &service{ss: ss}, nil } func (s *service) getSnapshotter(name string) (snapshots.Snapshotter, error) { @@ -69,7 +72,7 @@ func (s *service) getSnapshotter(name string) (snapshots.Snapshotter, error) { return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter argument missing") } - sn := s.db.Snapshotter(name) + sn := s.ss[name] if sn == nil { return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter not loaded: %s", name) } @@ -97,12 +100,6 @@ func (s *service) Prepare(ctx context.Context, pr *snapshotsapi.PrepareSnapshotR return nil, errdefs.ToGRPC(err) } - if err := s.publisher.Publish(ctx, "/snapshot/prepare", &eventstypes.SnapshotPrepare{ - Key: pr.Key, - Parent: pr.Parent, - }); err != nil { - return nil, err - } return &snapshotsapi.PrepareSnapshotResponse{ Mounts: fromMounts(mounts), }, nil @@ -158,12 +155,6 @@ func (s *service) Commit(ctx context.Context, cr *snapshotsapi.CommitSnapshotReq return nil, errdefs.ToGRPC(err) } - if err := s.publisher.Publish(ctx, "/snapshot/commit", &eventstypes.SnapshotCommit{ - Key: cr.Key, - Name: cr.Name, - }); err != nil { - return nil, err - } return empty, nil } @@ -178,11 +169,6 @@ func (s *service) Remove(ctx context.Context, rr *snapshotsapi.RemoveSnapshotReq return nil, errdefs.ToGRPC(err) } - if err := s.publisher.Publish(ctx, "/snapshot/remove", &eventstypes.SnapshotRemove{ - Key: rr.Key, - }); err != nil { - return nil, err - } return empty, nil } diff --git a/vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go b/vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go new file mode 100644 index 000000000..5da365110 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/snapshots/snapshotters.go @@ -0,0 +1,98 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package snapshots + +import ( + "context" + + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + "github.com/containerd/containerd/snapshots" +) + +// snapshotter wraps snapshots.Snapshotter with proper events published. +type snapshotter struct { + snapshots.Snapshotter + publisher events.Publisher +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.SnapshotsService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + db := m.(*metadata.DB) + ss := make(map[string]snapshots.Snapshotter) + for n, sn := range db.Snapshotters() { + ss[n] = newSnapshotter(sn, ic.Events) + } + return ss, nil + }, + }) +} + +func newSnapshotter(sn snapshots.Snapshotter, publisher events.Publisher) snapshots.Snapshotter { + return &snapshotter{ + Snapshotter: sn, + publisher: publisher, + } +} + +func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + mounts, err := s.Snapshotter.Prepare(ctx, key, parent, opts...) + if err != nil { + return nil, err + } + if err := s.publisher.Publish(ctx, "/snapshot/prepare", &eventstypes.SnapshotPrepare{ + Key: key, + Parent: parent, + }); err != nil { + return nil, err + } + return mounts, nil +} + +func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + if err := s.Snapshotter.Commit(ctx, name, key, opts...); err != nil { + return err + } + return s.publisher.Publish(ctx, "/snapshot/commit", &eventstypes.SnapshotCommit{ + Key: key, + Name: name, + }) +} + +func (s *snapshotter) Remove(ctx context.Context, key string) error { + if err := s.Snapshotter.Remove(ctx, key); err != nil { + return err + } + return s.publisher.Publish(ctx, "/snapshot/remove", &eventstypes.SnapshotRemove{ + Key: key, + }) +} diff --git a/vendor/github.com/containerd/containerd/services/tasks/local.go b/vendor/github.com/containerd/containerd/services/tasks/local.go new file mode 100644 index 000000000..5bdadab62 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/tasks/local.go @@ -0,0 +1,634 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tasks + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/boltdb/bolt" + api "github.com/containerd/containerd/api/services/tasks/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime" + "github.com/containerd/containerd/services" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + _ = (api.TasksClient)(&local{}) + empty = &ptypes.Empty{} +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.TasksService, + Requires: []plugin.Type{ + plugin.RuntimePlugin, + plugin.MetadataPlugin, + }, + InitFn: initFunc, + }) +} + +func initFunc(ic *plugin.InitContext) (interface{}, error) { + rt, err := ic.GetByType(plugin.RuntimePlugin) + if err != nil { + return nil, err + } + + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + cs := m.(*metadata.DB).ContentStore() + runtimes := make(map[string]runtime.Runtime) + for _, rr := range rt { + ri, err := rr.Instance() + if err != nil { + log.G(ic.Context).WithError(err).Warn("could not load runtime instance due to initialization error") + continue + } + r := ri.(runtime.Runtime) + runtimes[r.ID()] = r + } + + if len(runtimes) == 0 { + return nil, errors.New("no runtimes available to create task service") + } + return &local{ + runtimes: runtimes, + db: m.(*metadata.DB), + store: cs, + publisher: ic.Events, + }, nil +} + +type local struct { + runtimes map[string]runtime.Runtime + db *metadata.DB + store content.Store + publisher events.Publisher +} + +func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc.CallOption) (*api.CreateTaskResponse, error) { + var ( + checkpointPath string + err error + ) + if r.Checkpoint != nil { + checkpointPath, err = ioutil.TempDir("", "ctrd-checkpoint") + if err != nil { + return nil, err + } + if r.Checkpoint.MediaType != images.MediaTypeContainerd1Checkpoint { + return nil, fmt.Errorf("unsupported checkpoint type %q", r.Checkpoint.MediaType) + } + reader, err := l.store.ReaderAt(ctx, r.Checkpoint.Digest) + if err != nil { + return nil, err + } + _, err = archive.Apply(ctx, checkpointPath, content.NewReader(reader)) + reader.Close() + if err != nil { + return nil, err + } + } + + container, err := l.getContainer(ctx, r.ContainerID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + opts := runtime.CreateOpts{ + Spec: container.Spec, + IO: runtime.IO{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + Checkpoint: checkpointPath, + Options: r.Options, + } + for _, m := range r.Rootfs { + opts.Rootfs = append(opts.Rootfs, mount.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + }) + } + runtime, err := l.getRuntime(container.Runtime.Name) + if err != nil { + return nil, err + } + c, err := runtime.Create(ctx, r.ContainerID, opts) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + state, err := c.State(ctx) + if err != nil { + log.G(ctx).Error(err) + } + + return &api.CreateTaskResponse{ + ContainerID: r.ContainerID, + Pid: state.Pid, + }, nil +} + +func (l *local) Start(ctx context.Context, r *api.StartRequest, _ ...grpc.CallOption) (*api.StartResponse, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if err := p.Start(ctx); err != nil { + return nil, errdefs.ToGRPC(err) + } + state, err := p.State(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.StartResponse{ + Pid: state.Pid, + }, nil +} + +func (l *local) Delete(ctx context.Context, r *api.DeleteTaskRequest, _ ...grpc.CallOption) (*api.DeleteResponse, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + runtime, err := l.getRuntime(t.Info().Runtime) + if err != nil { + return nil, err + } + exit, err := runtime.Delete(ctx, t) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.DeleteResponse{ + ExitStatus: exit.Status, + ExitedAt: exit.Timestamp, + Pid: exit.Pid, + }, nil +} + +func (l *local) DeleteProcess(ctx context.Context, r *api.DeleteProcessRequest, _ ...grpc.CallOption) (*api.DeleteResponse, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + exit, err := t.DeleteProcess(ctx, r.ExecID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.DeleteResponse{ + ID: r.ExecID, + ExitStatus: exit.Status, + ExitedAt: exit.Timestamp, + Pid: exit.Pid, + }, nil +} + +func processFromContainerd(ctx context.Context, p runtime.Process) (*task.Process, error) { + state, err := p.State(ctx) + if err != nil { + return nil, err + } + var status task.Status + switch state.Status { + case runtime.CreatedStatus: + status = task.StatusCreated + case runtime.RunningStatus: + status = task.StatusRunning + case runtime.StoppedStatus: + status = task.StatusStopped + case runtime.PausedStatus: + status = task.StatusPaused + case runtime.PausingStatus: + status = task.StatusPausing + default: + log.G(ctx).WithField("status", state.Status).Warn("unknown status") + } + return &task.Process{ + ID: p.ID(), + Pid: state.Pid, + Status: status, + Stdin: state.Stdin, + Stdout: state.Stdout, + Stderr: state.Stderr, + Terminal: state.Terminal, + ExitStatus: state.ExitStatus, + ExitedAt: state.ExitedAt, + }, nil +} + +func (l *local) Get(ctx context.Context, r *api.GetRequest, _ ...grpc.CallOption) (*api.GetResponse, error) { + task, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(task) + if r.ExecID != "" { + if p, err = task.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + t, err := processFromContainerd(ctx, p) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.GetResponse{ + Process: t, + }, nil +} + +func (l *local) List(ctx context.Context, r *api.ListTasksRequest, _ ...grpc.CallOption) (*api.ListTasksResponse, error) { + resp := &api.ListTasksResponse{} + for _, r := range l.runtimes { + tasks, err := r.Tasks(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + addTasks(ctx, resp, tasks) + } + return resp, nil +} + +func addTasks(ctx context.Context, r *api.ListTasksResponse, tasks []runtime.Task) { + for _, t := range tasks { + tt, err := processFromContainerd(ctx, t) + if err != nil { + if !errdefs.IsNotFound(err) { // handle race with deletion + log.G(ctx).WithError(err).WithField("id", t.ID()).Error("converting task to protobuf") + } + continue + } + r.Tasks = append(r.Tasks, tt) + } +} + +func (l *local) Pause(ctx context.Context, r *api.PauseTaskRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + err = t.Pause(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (l *local) Resume(ctx context.Context, r *api.ResumeTaskRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + err = t.Resume(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (l *local) Kill(ctx context.Context, r *api.KillRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (l *local) ListPids(ctx context.Context, r *api.ListPidsRequest, _ ...grpc.CallOption) (*api.ListPidsResponse, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + processList, err := t.Pids(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + var processes []*task.ProcessInfo + for _, p := range processList { + pInfo := task.ProcessInfo{ + Pid: p.Pid, + } + if p.Info != nil { + a, err := typeurl.MarshalAny(p.Info) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal process %d info", p.Pid) + } + pInfo.Info = a + } + processes = append(processes, &pInfo) + } + return &api.ListPidsResponse{ + Processes: processes, + }, nil +} + +func (l *local) Exec(ctx context.Context, r *api.ExecProcessRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + if r.ExecID == "" { + return nil, status.Errorf(codes.InvalidArgument, "exec id cannot be empty") + } + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + if _, err := t.Exec(ctx, r.ExecID, runtime.ExecOpts{ + Spec: r.Spec, + IO: runtime.IO{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (l *local) ResizePty(ctx context.Context, r *api.ResizePtyRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if err := p.ResizePty(ctx, runtime.ConsoleSize{ + Width: r.Width, + Height: r.Height, + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (l *local) CloseIO(ctx context.Context, r *api.CloseIORequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if r.Stdin { + if err := p.CloseIO(ctx); err != nil { + return nil, err + } + } + return empty, nil +} + +func (l *local) Checkpoint(ctx context.Context, r *api.CheckpointTaskRequest, _ ...grpc.CallOption) (*api.CheckpointTaskResponse, error) { + container, err := l.getContainer(ctx, r.ContainerID) + if err != nil { + return nil, err + } + t, err := l.getTaskFromContainer(ctx, container) + if err != nil { + return nil, err + } + image, err := ioutil.TempDir("", "ctd-checkpoint") + if err != nil { + return nil, errdefs.ToGRPC(err) + } + defer os.RemoveAll(image) + if err := t.Checkpoint(ctx, image, r.Options); err != nil { + return nil, errdefs.ToGRPC(err) + } + // write checkpoint to the content store + tar := archive.Diff(ctx, "", image) + cp, err := l.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, image, tar) + // close tar first after write + if err := tar.Close(); err != nil { + return nil, err + } + if err != nil { + return nil, err + } + // write the config to the content store + data, err := container.Spec.Marshal() + if err != nil { + return nil, err + } + spec := bytes.NewReader(data) + specD, err := l.writeContent(ctx, images.MediaTypeContainerd1CheckpointConfig, filepath.Join(image, "spec"), spec) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.CheckpointTaskResponse{ + Descriptors: []*types.Descriptor{ + cp, + specD, + }, + }, nil +} + +func (l *local) Update(ctx context.Context, r *api.UpdateTaskRequest, _ ...grpc.CallOption) (*ptypes.Empty, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + if err := t.Update(ctx, r.Resources); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (l *local) Metrics(ctx context.Context, r *api.MetricsRequest, _ ...grpc.CallOption) (*api.MetricsResponse, error) { + filter, err := filters.ParseAll(r.Filters...) + if err != nil { + return nil, err + } + var resp api.MetricsResponse + for _, r := range l.runtimes { + tasks, err := r.Tasks(ctx) + if err != nil { + return nil, err + } + getTasksMetrics(ctx, filter, tasks, &resp) + } + return &resp, nil +} + +func (l *local) Wait(ctx context.Context, r *api.WaitRequest, _ ...grpc.CallOption) (*api.WaitResponse, error) { + t, err := l.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + exit, err := p.Wait(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.WaitResponse{ + ExitStatus: exit.Status, + ExitedAt: exit.Timestamp, + }, nil +} + +func getTasksMetrics(ctx context.Context, filter filters.Filter, tasks []runtime.Task, r *api.MetricsResponse) { + for _, tk := range tasks { + if !filter.Match(filters.AdapterFunc(func(fieldpath []string) (string, bool) { + t := tk + switch fieldpath[0] { + case "id": + return t.ID(), true + case "namespace": + return t.Info().Namespace, true + case "runtime": + return t.Info().Runtime, true + } + return "", false + })) { + continue + } + + collected := time.Now() + metrics, err := tk.Metrics(ctx) + if err != nil { + if !errdefs.IsNotFound(err) { + log.G(ctx).WithError(err).Errorf("collecting metrics for %s", tk.ID()) + } + continue + } + data, err := typeurl.MarshalAny(metrics) + if err != nil { + log.G(ctx).WithError(err).Errorf("marshal metrics for %s", tk.ID()) + continue + } + r.Metrics = append(r.Metrics, &types.Metric{ + ID: tk.ID(), + Timestamp: collected, + Data: data, + }) + } +} + +func (l *local) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { + writer, err := l.store.Writer(ctx, ref, 0, "") + if err != nil { + return nil, err + } + defer writer.Close() + size, err := io.Copy(writer, r) + if err != nil { + return nil, err + } + if err := writer.Commit(ctx, 0, ""); err != nil { + return nil, err + } + return &types.Descriptor{ + MediaType: mediaType, + Digest: writer.Digest(), + Size_: size, + }, nil +} + +func (l *local) getContainer(ctx context.Context, id string) (*containers.Container, error) { + var container containers.Container + if err := l.db.View(func(tx *bolt.Tx) error { + store := metadata.NewContainerStore(tx) + var err error + container, err = store.Get(ctx, id) + return err + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return &container, nil +} + +func (l *local) getTask(ctx context.Context, id string) (runtime.Task, error) { + container, err := l.getContainer(ctx, id) + if err != nil { + return nil, err + } + return l.getTaskFromContainer(ctx, container) +} + +func (l *local) getTaskFromContainer(ctx context.Context, container *containers.Container) (runtime.Task, error) { + runtime, err := l.getRuntime(container.Runtime.Name) + if err != nil { + return nil, errdefs.ToGRPCf(err, "runtime for task %s", container.Runtime.Name) + } + t, err := runtime.Get(ctx, container.ID) + if err != nil { + return nil, status.Errorf(codes.NotFound, "task %v not found", container.ID) + } + return t, nil +} + +func (l *local) getRuntime(name string) (runtime.Runtime, error) { + runtime, ok := l.runtimes[name] + if !ok { + return nil, status.Errorf(codes.NotFound, "unknown runtime %q", name) + } + return runtime, nil +} diff --git a/vendor/github.com/containerd/containerd/services/tasks/service.go b/vendor/github.com/containerd/containerd/services/tasks/service.go index b1329335b..becf1508b 100644 --- a/vendor/github.com/containerd/containerd/services/tasks/service.go +++ b/vendor/github.com/containerd/containerd/services/tasks/service.go @@ -17,42 +17,17 @@ package tasks import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/boltdb/bolt" api "github.com/containerd/containerd/api/services/tasks/v1" - "github.com/containerd/containerd/api/types" - "github.com/containerd/containerd/api/types/task" - "github.com/containerd/containerd/archive" - "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/filters" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/mount" "github.com/containerd/containerd/plugin" - "github.com/containerd/containerd/runtime" - "github.com/containerd/typeurl" + "github.com/containerd/containerd/services" ptypes "github.com/gogo/protobuf/types" "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) var ( - _ = (api.TasksServer)(&service{}) - empty = &ptypes.Empty{} + _ = (api.TasksServer)(&service{}) ) func init() { @@ -60,51 +35,28 @@ func init() { Type: plugin.GRPCPlugin, ID: "tasks", Requires: []plugin.Type{ - plugin.RuntimePlugin, - plugin.MetadataPlugin, + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.TasksService] + if !ok { + return nil, errors.New("tasks service not found") + } + i, err := p.Instance() + if err != nil { + return nil, err + } + return &service{local: i.(api.TasksClient)}, nil }, - InitFn: initFunc, }) } -func initFunc(ic *plugin.InitContext) (interface{}, error) { - rt, err := ic.GetByType(plugin.RuntimePlugin) - if err != nil { - return nil, err - } - - m, err := ic.Get(plugin.MetadataPlugin) - if err != nil { - return nil, err - } - cs := m.(*metadata.DB).ContentStore() - runtimes := make(map[string]runtime.Runtime) - for _, rr := range rt { - ri, err := rr.Instance() - if err != nil { - log.G(ic.Context).WithError(err).Warn("could not load runtime instance due to initialization error") - continue - } - r := ri.(runtime.Runtime) - runtimes[r.ID()] = r - } - - if len(runtimes) == 0 { - return nil, errors.New("no runtimes available to create task service") - } - return &service{ - runtimes: runtimes, - db: m.(*metadata.DB), - store: cs, - publisher: ic.Events, - }, nil -} - type service struct { - runtimes map[string]runtime.Runtime - db *metadata.DB - store content.Store - publisher events.Publisher + local api.TasksClient } func (s *service) Register(server *grpc.Server) error { @@ -113,526 +65,69 @@ func (s *service) Register(server *grpc.Server) error { } func (s *service) Create(ctx context.Context, r *api.CreateTaskRequest) (*api.CreateTaskResponse, error) { - var ( - checkpointPath string - err error - ) - if r.Checkpoint != nil { - checkpointPath, err = ioutil.TempDir("", "ctrd-checkpoint") - if err != nil { - return nil, err - } - if r.Checkpoint.MediaType != images.MediaTypeContainerd1Checkpoint { - return nil, fmt.Errorf("unsupported checkpoint type %q", r.Checkpoint.MediaType) - } - reader, err := s.store.ReaderAt(ctx, r.Checkpoint.Digest) - if err != nil { - return nil, err - } - _, err = archive.Apply(ctx, checkpointPath, content.NewReader(reader)) - reader.Close() - if err != nil { - return nil, err - } - } - - container, err := s.getContainer(ctx, r.ContainerID) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - opts := runtime.CreateOpts{ - Spec: container.Spec, - IO: runtime.IO{ - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Terminal: r.Terminal, - }, - Checkpoint: checkpointPath, - Options: r.Options, - } - for _, m := range r.Rootfs { - opts.Rootfs = append(opts.Rootfs, mount.Mount{ - Type: m.Type, - Source: m.Source, - Options: m.Options, - }) - } - runtime, err := s.getRuntime(container.Runtime.Name) - if err != nil { - return nil, err - } - c, err := runtime.Create(ctx, r.ContainerID, opts) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - state, err := c.State(ctx) - if err != nil { - log.G(ctx).Error(err) - } - - return &api.CreateTaskResponse{ - ContainerID: r.ContainerID, - Pid: state.Pid, - }, nil + return s.local.Create(ctx, r) } func (s *service) Start(ctx context.Context, r *api.StartRequest) (*api.StartResponse, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - p := runtime.Process(t) - if r.ExecID != "" { - if p, err = t.Process(ctx, r.ExecID); err != nil { - return nil, errdefs.ToGRPC(err) - } - } - if err := p.Start(ctx); err != nil { - return nil, errdefs.ToGRPC(err) - } - state, err := p.State(ctx) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &api.StartResponse{ - Pid: state.Pid, - }, nil + return s.local.Start(ctx, r) } func (s *service) Delete(ctx context.Context, r *api.DeleteTaskRequest) (*api.DeleteResponse, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - runtime, err := s.getRuntime(t.Info().Runtime) - if err != nil { - return nil, err - } - exit, err := runtime.Delete(ctx, t) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &api.DeleteResponse{ - ExitStatus: exit.Status, - ExitedAt: exit.Timestamp, - Pid: exit.Pid, - }, nil + return s.local.Delete(ctx, r) } func (s *service) DeleteProcess(ctx context.Context, r *api.DeleteProcessRequest) (*api.DeleteResponse, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - exit, err := t.DeleteProcess(ctx, r.ExecID) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &api.DeleteResponse{ - ID: r.ExecID, - ExitStatus: exit.Status, - ExitedAt: exit.Timestamp, - Pid: exit.Pid, - }, nil -} - -func processFromContainerd(ctx context.Context, p runtime.Process) (*task.Process, error) { - state, err := p.State(ctx) - if err != nil { - return nil, err - } - var status task.Status - switch state.Status { - case runtime.CreatedStatus: - status = task.StatusCreated - case runtime.RunningStatus: - status = task.StatusRunning - case runtime.StoppedStatus: - status = task.StatusStopped - case runtime.PausedStatus: - status = task.StatusPaused - case runtime.PausingStatus: - status = task.StatusPausing - default: - log.G(ctx).WithField("status", state.Status).Warn("unknown status") - } - return &task.Process{ - ID: p.ID(), - Pid: state.Pid, - Status: status, - Stdin: state.Stdin, - Stdout: state.Stdout, - Stderr: state.Stderr, - Terminal: state.Terminal, - ExitStatus: state.ExitStatus, - ExitedAt: state.ExitedAt, - }, nil + return s.local.DeleteProcess(ctx, r) } func (s *service) Get(ctx context.Context, r *api.GetRequest) (*api.GetResponse, error) { - task, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - p := runtime.Process(task) - if r.ExecID != "" { - if p, err = task.Process(ctx, r.ExecID); err != nil { - return nil, errdefs.ToGRPC(err) - } - } - t, err := processFromContainerd(ctx, p) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &api.GetResponse{ - Process: t, - }, nil + return s.local.Get(ctx, r) } func (s *service) List(ctx context.Context, r *api.ListTasksRequest) (*api.ListTasksResponse, error) { - resp := &api.ListTasksResponse{} - for _, r := range s.runtimes { - tasks, err := r.Tasks(ctx) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - addTasks(ctx, resp, tasks) - } - return resp, nil -} - -func addTasks(ctx context.Context, r *api.ListTasksResponse, tasks []runtime.Task) { - for _, t := range tasks { - tt, err := processFromContainerd(ctx, t) - if err != nil { - if !errdefs.IsNotFound(err) { // handle race with deletion - log.G(ctx).WithError(err).WithField("id", t.ID()).Error("converting task to protobuf") - } - continue - } - r.Tasks = append(r.Tasks, tt) - } + return s.local.List(ctx, r) } func (s *service) Pause(ctx context.Context, r *api.PauseTaskRequest) (*ptypes.Empty, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - err = t.Pause(ctx) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil + return s.local.Pause(ctx, r) } func (s *service) Resume(ctx context.Context, r *api.ResumeTaskRequest) (*ptypes.Empty, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - err = t.Resume(ctx) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil + return s.local.Resume(ctx, r) } func (s *service) Kill(ctx context.Context, r *api.KillRequest) (*ptypes.Empty, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - p := runtime.Process(t) - if r.ExecID != "" { - if p, err = t.Process(ctx, r.ExecID); err != nil { - return nil, errdefs.ToGRPC(err) - } - } - if err := p.Kill(ctx, r.Signal, r.All); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil + return s.local.Kill(ctx, r) } func (s *service) ListPids(ctx context.Context, r *api.ListPidsRequest) (*api.ListPidsResponse, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - processList, err := t.Pids(ctx) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - var processes []*task.ProcessInfo - for _, p := range processList { - pInfo := task.ProcessInfo{ - Pid: p.Pid, - } - if p.Info != nil { - a, err := typeurl.MarshalAny(p.Info) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", p.Pid) - } - pInfo.Info = a - } - processes = append(processes, &pInfo) - } - return &api.ListPidsResponse{ - Processes: processes, - }, nil + return s.local.ListPids(ctx, r) } func (s *service) Exec(ctx context.Context, r *api.ExecProcessRequest) (*ptypes.Empty, error) { - if r.ExecID == "" { - return nil, status.Errorf(codes.InvalidArgument, "exec id cannot be empty") - } - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - if _, err := t.Exec(ctx, r.ExecID, runtime.ExecOpts{ - Spec: r.Spec, - IO: runtime.IO{ - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Terminal: r.Terminal, - }, - }); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil + return s.local.Exec(ctx, r) } func (s *service) ResizePty(ctx context.Context, r *api.ResizePtyRequest) (*ptypes.Empty, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - p := runtime.Process(t) - if r.ExecID != "" { - if p, err = t.Process(ctx, r.ExecID); err != nil { - return nil, errdefs.ToGRPC(err) - } - } - if err := p.ResizePty(ctx, runtime.ConsoleSize{ - Width: r.Width, - Height: r.Height, - }); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil + return s.local.ResizePty(ctx, r) } func (s *service) CloseIO(ctx context.Context, r *api.CloseIORequest) (*ptypes.Empty, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - p := runtime.Process(t) - if r.ExecID != "" { - if p, err = t.Process(ctx, r.ExecID); err != nil { - return nil, errdefs.ToGRPC(err) - } - } - if r.Stdin { - if err := p.CloseIO(ctx); err != nil { - return nil, err - } - } - return empty, nil + return s.local.CloseIO(ctx, r) } func (s *service) Checkpoint(ctx context.Context, r *api.CheckpointTaskRequest) (*api.CheckpointTaskResponse, error) { - container, err := s.getContainer(ctx, r.ContainerID) - if err != nil { - return nil, err - } - t, err := s.getTaskFromContainer(ctx, container) - if err != nil { - return nil, err - } - image, err := ioutil.TempDir("", "ctd-checkpoint") - if err != nil { - return nil, errdefs.ToGRPC(err) - } - defer os.RemoveAll(image) - if err := t.Checkpoint(ctx, image, r.Options); err != nil { - return nil, errdefs.ToGRPC(err) - } - // write checkpoint to the content store - tar := archive.Diff(ctx, "", image) - cp, err := s.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, image, tar) - // close tar first after write - if err := tar.Close(); err != nil { - return nil, err - } - if err != nil { - return nil, err - } - // write the config to the content store - data, err := container.Spec.Marshal() - if err != nil { - return nil, err - } - spec := bytes.NewReader(data) - specD, err := s.writeContent(ctx, images.MediaTypeContainerd1CheckpointConfig, filepath.Join(image, "spec"), spec) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &api.CheckpointTaskResponse{ - Descriptors: []*types.Descriptor{ - cp, - specD, - }, - }, nil + return s.local.Checkpoint(ctx, r) } func (s *service) Update(ctx context.Context, r *api.UpdateTaskRequest) (*ptypes.Empty, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - if err := t.Update(ctx, r.Resources); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil + return s.local.Update(ctx, r) } func (s *service) Metrics(ctx context.Context, r *api.MetricsRequest) (*api.MetricsResponse, error) { - filter, err := filters.ParseAll(r.Filters...) - if err != nil { - return nil, err - } - var resp api.MetricsResponse - for _, r := range s.runtimes { - tasks, err := r.Tasks(ctx) - if err != nil { - return nil, err - } - getTasksMetrics(ctx, filter, tasks, &resp) - } - return &resp, nil + return s.local.Metrics(ctx, r) } func (s *service) Wait(ctx context.Context, r *api.WaitRequest) (*api.WaitResponse, error) { - t, err := s.getTask(ctx, r.ContainerID) - if err != nil { - return nil, err - } - p := runtime.Process(t) - if r.ExecID != "" { - if p, err = t.Process(ctx, r.ExecID); err != nil { - return nil, errdefs.ToGRPC(err) - } - } - exit, err := p.Wait(ctx) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &api.WaitResponse{ - ExitStatus: exit.Status, - ExitedAt: exit.Timestamp, - }, nil -} - -func getTasksMetrics(ctx context.Context, filter filters.Filter, tasks []runtime.Task, r *api.MetricsResponse) { - for _, tk := range tasks { - if !filter.Match(filters.AdapterFunc(func(fieldpath []string) (string, bool) { - t := tk - switch fieldpath[0] { - case "id": - return t.ID(), true - case "namespace": - return t.Info().Namespace, true - case "runtime": - return t.Info().Runtime, true - } - return "", false - })) { - continue - } - - collected := time.Now() - metrics, err := tk.Metrics(ctx) - if err != nil { - if !errdefs.IsNotFound(err) { - log.G(ctx).WithError(err).Errorf("collecting metrics for %s", tk.ID()) - } - continue - } - data, err := typeurl.MarshalAny(metrics) - if err != nil { - log.G(ctx).WithError(err).Errorf("marshal metrics for %s", tk.ID()) - continue - } - r.Metrics = append(r.Metrics, &types.Metric{ - ID: tk.ID(), - Timestamp: collected, - Data: data, - }) - } -} - -func (s *service) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { - writer, err := s.store.Writer(ctx, ref, 0, "") - if err != nil { - return nil, err - } - defer writer.Close() - size, err := io.Copy(writer, r) - if err != nil { - return nil, err - } - if err := writer.Commit(ctx, 0, ""); err != nil { - return nil, err - } - return &types.Descriptor{ - MediaType: mediaType, - Digest: writer.Digest(), - Size_: size, - }, nil -} - -func (s *service) getContainer(ctx context.Context, id string) (*containers.Container, error) { - var container containers.Container - if err := s.db.View(func(tx *bolt.Tx) error { - store := metadata.NewContainerStore(tx) - var err error - container, err = store.Get(ctx, id) - return err - }); err != nil { - return nil, errdefs.ToGRPC(err) - } - return &container, nil -} - -func (s *service) getTask(ctx context.Context, id string) (runtime.Task, error) { - container, err := s.getContainer(ctx, id) - if err != nil { - return nil, err - } - return s.getTaskFromContainer(ctx, container) -} - -func (s *service) getTaskFromContainer(ctx context.Context, container *containers.Container) (runtime.Task, error) { - runtime, err := s.getRuntime(container.Runtime.Name) - if err != nil { - return nil, errdefs.ToGRPCf(err, "runtime for task %s", container.Runtime.Name) - } - t, err := runtime.Get(ctx, container.ID) - if err != nil { - return nil, status.Errorf(codes.NotFound, "task %v not found", container.ID) - } - return t, nil -} - -func (s *service) getRuntime(name string) (runtime.Runtime, error) { - runtime, ok := s.runtimes[name] - if !ok { - return nil, status.Errorf(codes.NotFound, "unknown runtime %q", name) - } - return runtime, nil + return s.local.Wait(ctx, r) } diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/check.go b/vendor/github.com/containerd/containerd/snapshots/overlay/check.go new file mode 100644 index 000000000..6ec59b7ed --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/check.go @@ -0,0 +1,88 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +// supportsMultipleLowerDir checks if the system supports multiple lowerdirs, +// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs +// are always available (so this check isn't needed), and backported to RHEL and +// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect +// support on those kernels, without doing a kernel version compare. +// +// Ported from moby overlay2. +func supportsMultipleLowerDir(d string) error { + td, err := ioutil.TempDir(d, "multiple-lowerdir-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + log.L.WithError(err).Warnf("Failed to remove check directory %v", td) + } + }() + + for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { + if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { + return err + } + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")) + m := mount.Mount{ + Type: "overlay", + Source: "overlay", + Options: []string{opts}, + } + dest := filepath.Join(td, "merged") + if err := m.Mount(dest); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + if err := mount.UnmountAll(dest, 0); err != nil { + log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) + } + return nil +} + +// Supported returns nil when the overlayfs is functional on the system with the root directory. +// Suppported is not called during plugin initialization, but exposed for downstream projects which uses +// this snapshotter as a library. +func Supported(root string) error { + if err := os.MkdirAll(root, 0700); err != nil { + return err + } + supportsDType, err := fs.SupportsDType(root) + if err != nil { + return err + } + if !supportsDType { + return fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) + } + return supportsMultipleLowerDir(root) +} diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index 0639c6399..55165f278 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -1,10 +1,14 @@ -github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 -github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f -github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e -github.com/containerd/cgroups c0710c92e8b3a44681d1321dcfd1360fc5c6c089 +github.com/containerd/go-runc bcb223a061a3dd7de1a89c0b402a60f4dd9bd307 +github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925 +github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 +github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c +github.com/containerd/btrfs 2e1aa0ddf94f91fa282b6ed87c23bf0d64911244 +github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 +github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 +github.com/docker/go-units v0.3.1 github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c @@ -12,15 +16,12 @@ github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/matttproud/golang_protobuf_extensions v1.0.0 -github.com/docker/go-units v0.3.1 github.com/gogo/protobuf v0.5 github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/runc a618ab5a0186905949ee463dbb762c3d23e12a80 github.com/sirupsen/logrus v1.0.0 -github.com/containerd/btrfs cc52c4dea2ce11a44e6639e561bb5c2af9ada9e3 github.com/pmezard/go-difflib v1.0.0 -github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 google.golang.org/grpc v1.7.4 @@ -28,7 +29,6 @@ github.com/pkg/errors v0.8.0 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys github.com/opencontainers/image-spec v1.0.1 -github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 @@ -37,13 +37,12 @@ github.com/Microsoft/hcsshim v0.6.7 github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/dmcgowan/go-tar go1.10 github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 github.com/gotestyourself/gotestyourself 44dbf532bbf5767611f6f2a61bded572e337010a github.com/google/go-cmp v0.1.0 # cri dependencies -github.com/containerd/cri-containerd c9081b2ec0eefc799f0f1caabbea29d516c72c44 +github.com/containerd/cri-containerd dcc278739fb31c5369f927c69716275c084c3d53 https://github.com/Random-Liu/cri-containerd.git github.com/blang/semver v3.1.0 github.com/containernetworking/cni v0.6.0 github.com/containernetworking/plugins v0.6.0 diff --git a/vendor/github.com/dmcgowan/go-tar/LICENSE b/vendor/github.com/dmcgowan/go-tar/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/dmcgowan/go-tar/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dmcgowan/go-tar/common.go b/vendor/github.com/dmcgowan/go-tar/common.go deleted file mode 100644 index e3609536c..000000000 --- a/vendor/github.com/dmcgowan/go-tar/common.go +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tar implements access to tar archives. -// -// Tape archives (tar) are a file format for storing a sequence of files that -// can be read and written in a streaming manner. -// This package aims to cover most variations of the format, -// including those produced by GNU and BSD tar tools. -package tar - -import ( - "errors" - "fmt" - "io" - "math" - "os" - "path" - "reflect" - "strconv" - "strings" - "time" -) - -// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit -// architectures. If a large value is encountered when decoding, the result -// stored in Header will be the truncated version. - -var ( - ErrHeader = errors.New("tar: invalid tar header") - ErrWriteTooLong = errors.New("tar: write too long") - ErrFieldTooLong = errors.New("tar: header field too long") - ErrWriteAfterClose = errors.New("tar: write after close") - errMissData = errors.New("tar: sparse file references non-existent data") - errUnrefData = errors.New("tar: sparse file contains unreferenced data") - errWriteHole = errors.New("tar: write non-NUL byte in sparse hole") -) - -type headerError []string - -func (he headerError) Error() string { - const prefix = "tar: cannot encode header" - var ss []string - for _, s := range he { - if s != "" { - ss = append(ss, s) - } - } - if len(ss) == 0 { - return prefix - } - return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) -} - -// Type flags for Header.Typeflag. -const ( - // Type '0' indicates a regular file. - TypeReg = '0' - TypeRegA = '\x00' // For legacy support; use TypeReg instead - - // Type '1' to '6' are header-only flags and may not have a data body. - TypeLink = '1' // Hard link - TypeSymlink = '2' // Symbolic link - TypeChar = '3' // Character device node - TypeBlock = '4' // Block device node - TypeDir = '5' // Directory - TypeFifo = '6' // FIFO node - - // Type '7' is reserved. - TypeCont = '7' - - // Type 'x' is used by the PAX format to store key-value records that - // are only relevant to the next file. - // This package transparently handles these types. - TypeXHeader = 'x' - - // Type 'g' is used by the PAX format to store key-value records that - // are relevant to all subsequent files. - // This package only supports parsing and composing such headers, - // but does not currently support persisting the global state across files. - TypeXGlobalHeader = 'g' - - // Type 'S' indicates a sparse file in the GNU format. - // Header.SparseHoles should be populated when using this type. - TypeGNUSparse = 'S' - - // Types 'L' and 'K' are used by the GNU format for a meta file - // used to store the path or link name for the next file. - // This package transparently handles these types. - TypeGNULongName = 'L' - TypeGNULongLink = 'K' -) - -// Keywords for PAX extended header records. -const ( - paxNone = "" // Indicates that no PAX key is suitable - paxPath = "path" - paxLinkpath = "linkpath" - paxSize = "size" - paxUid = "uid" - paxGid = "gid" - paxUname = "uname" - paxGname = "gname" - paxMtime = "mtime" - paxAtime = "atime" - paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid - paxCharset = "charset" // Currently unused - paxComment = "comment" // Currently unused - - paxSchilyXattr = "SCHILY.xattr." - - // Keywords for GNU sparse files in a PAX extended header. - paxGNUSparse = "GNU.sparse." - paxGNUSparseNumBlocks = "GNU.sparse.numblocks" - paxGNUSparseOffset = "GNU.sparse.offset" - paxGNUSparseNumBytes = "GNU.sparse.numbytes" - paxGNUSparseMap = "GNU.sparse.map" - paxGNUSparseName = "GNU.sparse.name" - paxGNUSparseMajor = "GNU.sparse.major" - paxGNUSparseMinor = "GNU.sparse.minor" - paxGNUSparseSize = "GNU.sparse.size" - paxGNUSparseRealSize = "GNU.sparse.realsize" -) - -// basicKeys is a set of the PAX keys for which we have built-in support. -// This does not contain "charset" or "comment", which are both PAX-specific, -// so adding them as first-class features of Header is unlikely. -// Users can use the PAXRecords field to set it themselves. -var basicKeys = map[string]bool{ - paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, - paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, -} - -// A Header represents a single header in a tar archive. -// Some fields may not be populated. -// -// For forward compatibility, users that retrieve a Header from Reader.Next, -// mutate it in some ways, and then pass it back to Writer.WriteHeader -// should do so by creating a new Header and copying the fields -// that they are interested in preserving. -type Header struct { - Typeflag byte // Type of header entry (should be TypeReg for most files) - - Name string // Name of file entry - Linkname string // Target name of link (valid for TypeLink or TypeSymlink) - - Size int64 // Logical file size in bytes - Mode int64 // Permission and mode bits - Uid int // User ID of owner - Gid int // Group ID of owner - Uname string // User name of owner - Gname string // Group name of owner - - // If the Format is unspecified, then Writer.WriteHeader rounds ModTime - // to the nearest second and ignores the AccessTime and ChangeTime fields. - // - // To use AccessTime or ChangeTime, specify the Format as PAX or GNU. - // To use sub-second resolution, specify the Format as PAX. - ModTime time.Time // Modification time - AccessTime time.Time // Access time (requires either PAX or GNU support) - ChangeTime time.Time // Change time (requires either PAX or GNU support) - - Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) - Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) - - // SparseHoles represents a sequence of holes in a sparse file. - // - // A file is sparse if len(SparseHoles) > 0 or Typeflag is TypeGNUSparse. - // If TypeGNUSparse is set, then the format is GNU, otherwise - // the format is PAX (by using GNU-specific PAX records). - // - // A sparse file consists of fragments of data, intermixed with holes - // (described by this field). A hole is semantically a block of NUL-bytes, - // but does not actually exist within the tar file. - // The holes must be sorted in ascending order, - // not overlap with each other, and not extend past the specified Size. - SparseHoles []SparseEntry - - // Xattrs stores extended attributes as PAX records under the - // "SCHILY.xattr." namespace. - // - // The following are semantically equivalent: - // h.Xattrs[key] = value - // h.PAXRecords["SCHILY.xattr."+key] = value - // - // When Writer.WriteHeader is called, the contents of Xattrs will take - // precedence over those in PAXRecords. - // - // Deprecated: Use PAXRecords instead. - Xattrs map[string]string - - // PAXRecords is a map of PAX extended header records. - // - // User-defined records should have keys of the following form: - // VENDOR.keyword - // Where VENDOR is some namespace in all uppercase, and keyword may - // not contain the '=' character (e.g., "GOLANG.pkg.version"). - // The key and value should be non-empty UTF-8 strings. - // - // When Writer.WriteHeader is called, PAX records derived from the - // the other fields in Header take precedence over PAXRecords. - PAXRecords map[string]string - - // Format specifies the format of the tar header. - // - // This is set by Reader.Next as a best-effort guess at the format. - // Since the Reader liberally reads some non-compliant files, - // it is possible for this to be FormatUnknown. - // - // If the format is unspecified when Writer.WriteHeader is called, - // then it uses the first format (in the order of USTAR, PAX, GNU) - // capable of encoding this Header (see Format). - Format Format -} - -// SparseEntry represents a Length-sized fragment at Offset in the file. -type SparseEntry struct{ Offset, Length int64 } - -func (s SparseEntry) endOffset() int64 { return s.Offset + s.Length } - -// A sparse file can be represented as either a sparseDatas or a sparseHoles. -// As long as the total size is known, they are equivalent and one can be -// converted to the other form and back. The various tar formats with sparse -// file support represent sparse files in the sparseDatas form. That is, they -// specify the fragments in the file that has data, and treat everything else as -// having zero bytes. As such, the encoding and decoding logic in this package -// deals with sparseDatas. -// -// However, the external API uses sparseHoles instead of sparseDatas because the -// zero value of sparseHoles logically represents a normal file (i.e., there are -// no holes in it). On the other hand, the zero value of sparseDatas implies -// that the file has no data in it, which is rather odd. -// -// As an example, if the underlying raw file contains the 10-byte data: -// var compactFile = "abcdefgh" -// -// And the sparse map has the following entries: -// var spd sparseDatas = []sparseEntry{ -// {Offset: 2, Length: 5}, // Data fragment for 2..6 -// {Offset: 18, Length: 3}, // Data fragment for 18..20 -// } -// var sph sparseHoles = []SparseEntry{ -// {Offset: 0, Length: 2}, // Hole fragment for 0..1 -// {Offset: 7, Length: 11}, // Hole fragment for 7..17 -// {Offset: 21, Length: 4}, // Hole fragment for 21..24 -// } -// -// Then the content of the resulting sparse file with a Header.Size of 25 is: -// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 -type ( - sparseDatas []SparseEntry - sparseHoles []SparseEntry -) - -// validateSparseEntries reports whether sp is a valid sparse map. -// It does not matter whether sp represents data fragments or hole fragments. -func validateSparseEntries(sp []SparseEntry, size int64) bool { - // Validate all sparse entries. These are the same checks as performed by - // the BSD tar utility. - if size < 0 { - return false - } - var pre SparseEntry - for _, cur := range sp { - switch { - case cur.Offset < 0 || cur.Length < 0: - return false // Negative values are never okay - case cur.Offset > math.MaxInt64-cur.Length: - return false // Integer overflow with large length - case cur.endOffset() > size: - return false // Region extends beyond the actual size - case pre.endOffset() > cur.Offset: - return false // Regions cannot overlap and must be in order - } - pre = cur - } - return true -} - -// alignSparseEntries mutates src and returns dst where each fragment's -// starting offset is aligned up to the nearest block edge, and each -// ending offset is aligned down to the nearest block edge. -// -// Even though the Go tar Reader and the BSD tar utility can handle entries -// with arbitrary offsets and lengths, the GNU tar utility can only handle -// offsets and lengths that are multiples of blockSize. -func alignSparseEntries(src []SparseEntry, size int64) []SparseEntry { - dst := src[:0] - for _, s := range src { - pos, end := s.Offset, s.endOffset() - pos += blockPadding(+pos) // Round-up to nearest blockSize - if end != size { - end -= blockPadding(-end) // Round-down to nearest blockSize - } - if pos < end { - dst = append(dst, SparseEntry{Offset: pos, Length: end - pos}) - } - } - return dst -} - -// invertSparseEntries converts a sparse map from one form to the other. -// If the input is sparseHoles, then it will output sparseDatas and vice-versa. -// The input must have been already validated. -// -// This function mutates src and returns a normalized map where: -// * adjacent fragments are coalesced together -// * only the last fragment may be empty -// * the endOffset of the last fragment is the total size -func invertSparseEntries(src []SparseEntry, size int64) []SparseEntry { - dst := src[:0] - var pre SparseEntry - for _, cur := range src { - if cur.Length == 0 { - continue // Skip empty fragments - } - pre.Length = cur.Offset - pre.Offset - if pre.Length > 0 { - dst = append(dst, pre) // Only add non-empty fragments - } - pre.Offset = cur.endOffset() - } - pre.Length = size - pre.Offset // Possibly the only empty fragment - return append(dst, pre) -} - -// fileState tracks the number of logical (includes sparse holes) and physical -// (actual in tar archive) bytes remaining for the current file. -// -// Invariant: LogicalRemaining >= PhysicalRemaining -type fileState interface { - LogicalRemaining() int64 - PhysicalRemaining() int64 -} - -// allowedFormats determines which formats can be used. -// The value returned is the logical OR of multiple possible formats. -// If the value is FormatUnknown, then the input Header cannot be encoded -// and an error is returned explaining why. -// -// As a by-product of checking the fields, this function returns paxHdrs, which -// contain all fields that could not be directly encoded. -// A value receiver ensures that this method does not mutate the source Header. -func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { - format = FormatUSTAR | FormatPAX | FormatGNU - paxHdrs = make(map[string]string) - - var whyNoUSTAR, whyNoPAX, whyNoGNU string - var preferPAX bool // Prefer PAX over USTAR - verifyString := func(s string, size int, name, paxKey string) { - // NUL-terminator is optional for path and linkpath. - // Technically, it is required for uname and gname, - // but neither GNU nor BSD tar checks for it. - tooLong := len(s) > size - allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath - if hasNUL(s) || (tooLong && !allowLongGNU) { - whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) - format.mustNotBe(FormatGNU) - } - if !isASCII(s) || tooLong { - canSplitUSTAR := paxKey == paxPath - if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { - whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) - format.mustNotBe(FormatUSTAR) - } - if paxKey == paxNone { - whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) - format.mustNotBe(FormatPAX) - } else { - paxHdrs[paxKey] = s - } - } - if v, ok := h.PAXRecords[paxKey]; ok && v == s { - paxHdrs[paxKey] = v - } - } - verifyNumeric := func(n int64, size int, name, paxKey string) { - if !fitsInBase256(size, n) { - whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) - format.mustNotBe(FormatGNU) - } - if !fitsInOctal(size, n) { - whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) - format.mustNotBe(FormatUSTAR) - if paxKey == paxNone { - whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) - format.mustNotBe(FormatPAX) - } else { - paxHdrs[paxKey] = strconv.FormatInt(n, 10) - } - } - if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { - paxHdrs[paxKey] = v - } - } - verifyTime := func(ts time.Time, size int, name, paxKey string) { - if ts.IsZero() { - return // Always okay - } - if !fitsInBase256(size, ts.Unix()) { - whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) - format.mustNotBe(FormatGNU) - } - isMtime := paxKey == paxMtime - fitsOctal := fitsInOctal(size, ts.Unix()) - if (isMtime && !fitsOctal) || !isMtime { - whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) - format.mustNotBe(FormatUSTAR) - } - needsNano := ts.Nanosecond() != 0 - if !isMtime || !fitsOctal || needsNano { - preferPAX = true // USTAR may truncate sub-second measurements - if paxKey == paxNone { - whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) - format.mustNotBe(FormatPAX) - } else { - paxHdrs[paxKey] = formatPAXTime(ts) - } - } - if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { - paxHdrs[paxKey] = v - } - } - - // Check basic fields. - var blk block - v7 := blk.V7() - ustar := blk.USTAR() - gnu := blk.GNU() - verifyString(h.Name, len(v7.Name()), "Name", paxPath) - verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) - verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) - verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) - verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) - verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) - verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) - verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) - verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) - verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) - verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) - verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) - verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) - - // Check for header-only types. - var whyOnlyPAX, whyOnlyGNU string - switch h.Typeflag { - case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: - // Exclude TypeLink and TypeSymlink, since they may reference directories. - if strings.HasSuffix(h.Name, "/") { - return FormatUnknown, nil, headerError{"filename may not have trailing slash"} - } - case TypeXHeader, TypeGNULongName, TypeGNULongLink: - return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} - case TypeXGlobalHeader: - if !reflect.DeepEqual(h, Header{Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}) { - return FormatUnknown, nil, headerError{"only PAXRecords may be set for TypeXGlobalHeader"} - } - whyOnlyPAX = "only PAX supports TypeXGlobalHeader" - format.mayOnlyBe(FormatPAX) - } - if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { - return FormatUnknown, nil, headerError{"negative size on header-only type"} - } - - // Check PAX records. - if len(h.Xattrs) > 0 { - for k, v := range h.Xattrs { - paxHdrs[paxSchilyXattr+k] = v - } - whyOnlyPAX = "only PAX supports Xattrs" - format.mayOnlyBe(FormatPAX) - } - if len(h.PAXRecords) > 0 { - for k, v := range h.PAXRecords { - switch _, exists := paxHdrs[k]; { - case exists: - continue // Do not overwrite existing records - case h.Typeflag == TypeXGlobalHeader: - paxHdrs[k] = v // Copy all records - case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): - paxHdrs[k] = v // Ignore local records that may conflict - } - } - whyOnlyPAX = "only PAX supports PAXRecords" - format.mayOnlyBe(FormatPAX) - } - for k, v := range paxHdrs { - if !validPAXRecord(k, v) { - return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} - } - } - - // Check sparse files. - if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { - if isHeaderOnlyType(h.Typeflag) { - return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} - } - if !validateSparseEntries(h.SparseHoles, h.Size) { - return FormatUnknown, nil, headerError{"invalid sparse holes"} - } - if h.Typeflag == TypeGNUSparse { - whyOnlyGNU = "only GNU supports TypeGNUSparse" - format.mayOnlyBe(FormatGNU) - } else { - whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" - format.mustNotBe(FormatGNU) - } - whyNoUSTAR = "USTAR does not support sparse files" - format.mustNotBe(FormatUSTAR) - } - - // Check desired format. - if wantFormat := h.Format; wantFormat != FormatUnknown { - if wantFormat.has(FormatPAX) && !preferPAX { - wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too - } - format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted - } - if format == FormatUnknown { - switch h.Format { - case FormatUSTAR: - err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} - case FormatPAX: - err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} - case FormatGNU: - err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} - default: - err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} - } - } - return format, paxHdrs, err -} - -var sysSparseDetect func(f *os.File) (sparseHoles, error) -var sysSparsePunch func(f *os.File, sph sparseHoles) error - -// DetectSparseHoles searches for holes within f to populate SparseHoles -// on supported operating systems and filesystems. -// The file offset is cleared to zero. -// -// When packing a sparse file, DetectSparseHoles should be called prior to -// serializing the header to the archive with Writer.WriteHeader. -func (h *Header) DetectSparseHoles(f *os.File) (err error) { - defer func() { - if _, serr := f.Seek(0, io.SeekStart); err == nil { - err = serr - } - }() - - h.SparseHoles = nil - if sysSparseDetect != nil { - sph, err := sysSparseDetect(f) - h.SparseHoles = sph - return err - } - return nil -} - -// PunchSparseHoles destroys the contents of f, and prepares a sparse file -// (on supported operating systems and filesystems) -// with holes punched according to SparseHoles. -// The file offset is cleared to zero. -// -// When extracting a sparse file, PunchSparseHoles should be called prior to -// populating the content of a file with Reader.WriteTo. -func (h *Header) PunchSparseHoles(f *os.File) (err error) { - defer func() { - if _, serr := f.Seek(0, io.SeekStart); err == nil { - err = serr - } - }() - - if err := f.Truncate(0); err != nil { - return err - } - - var size int64 - if len(h.SparseHoles) > 0 { - size = h.SparseHoles[len(h.SparseHoles)-1].endOffset() - } - if !validateSparseEntries(h.SparseHoles, size) { - return errors.New("tar: invalid sparse holes") - } - - if size == 0 { - return nil // For non-sparse files, do nothing (other than Truncate) - } - if sysSparsePunch != nil { - return sysSparsePunch(f, h.SparseHoles) - } - return f.Truncate(size) -} - -// FileInfo returns an os.FileInfo for the Header. -func (h *Header) FileInfo() os.FileInfo { - return headerFileInfo{h} -} - -// headerFileInfo implements os.FileInfo. -type headerFileInfo struct { - h *Header -} - -func (fi headerFileInfo) Size() int64 { return fi.h.Size } -func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } -func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } -func (fi headerFileInfo) Sys() interface{} { return fi.h } - -// Name returns the base name of the file. -func (fi headerFileInfo) Name() string { - if fi.IsDir() { - return path.Base(path.Clean(fi.h.Name)) - } - return path.Base(fi.h.Name) -} - -// Mode returns the permission and mode bits for the headerFileInfo. -func (fi headerFileInfo) Mode() (mode os.FileMode) { - // Set file permission bits. - mode = os.FileMode(fi.h.Mode).Perm() - - // Set setuid, setgid and sticky bits. - if fi.h.Mode&c_ISUID != 0 { - mode |= os.ModeSetuid - } - if fi.h.Mode&c_ISGID != 0 { - mode |= os.ModeSetgid - } - if fi.h.Mode&c_ISVTX != 0 { - mode |= os.ModeSticky - } - - // Set file mode bits; clear perm, setuid, setgid, and sticky bits. - switch m := os.FileMode(fi.h.Mode) &^ 07777; m { - case c_ISDIR: - mode |= os.ModeDir - case c_ISFIFO: - mode |= os.ModeNamedPipe - case c_ISLNK: - mode |= os.ModeSymlink - case c_ISBLK: - mode |= os.ModeDevice - case c_ISCHR: - mode |= os.ModeDevice - mode |= os.ModeCharDevice - case c_ISSOCK: - mode |= os.ModeSocket - } - - switch fi.h.Typeflag { - case TypeSymlink: - mode |= os.ModeSymlink - case TypeChar: - mode |= os.ModeDevice - mode |= os.ModeCharDevice - case TypeBlock: - mode |= os.ModeDevice - case TypeDir: - mode |= os.ModeDir - case TypeFifo: - mode |= os.ModeNamedPipe - } - - return mode -} - -// sysStat, if non-nil, populates h from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, h *Header) error - -const ( - // Mode constants from the USTAR spec: - // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - - // Common Unix mode constants; these are not defined in any common tar standard. - // Header.FileInfo understands these, but FileInfoHeader will never produce these. - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -// FileInfoHeader creates a partially-populated Header from fi. -// If fi describes a symlink, FileInfoHeader records link as the link target. -// If fi describes a directory, a slash is appended to the name. -// -// Since os.FileInfo's Name method only returns the base name of -// the file it describes, it may be necessary to modify Header.Name -// to provide the full path name of the file. -// -// This function does not populate Header.SparseHoles; -// for sparse file support, additionally call Header.DetectSparseHoles. -func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { - if fi == nil { - return nil, errors.New("tar: FileInfo is nil") - } - fm := fi.Mode() - h := &Header{ - Name: fi.Name(), - ModTime: fi.ModTime(), - Mode: int64(fm.Perm()), // or'd with c_IS* constants later - } - switch { - case fm.IsRegular(): - h.Typeflag = TypeReg - h.Size = fi.Size() - case fi.IsDir(): - h.Typeflag = TypeDir - h.Name += "/" - case fm&os.ModeSymlink != 0: - h.Typeflag = TypeSymlink - h.Linkname = link - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - h.Typeflag = TypeChar - } else { - h.Typeflag = TypeBlock - } - case fm&os.ModeNamedPipe != 0: - h.Typeflag = TypeFifo - case fm&os.ModeSocket != 0: - return nil, fmt.Errorf("tar: sockets not supported") - default: - return nil, fmt.Errorf("tar: unknown file mode %v", fm) - } - if fm&os.ModeSetuid != 0 { - h.Mode |= c_ISUID - } - if fm&os.ModeSetgid != 0 { - h.Mode |= c_ISGID - } - if fm&os.ModeSticky != 0 { - h.Mode |= c_ISVTX - } - // If possible, populate additional fields from OS-specific - // FileInfo fields. - if sys, ok := fi.Sys().(*Header); ok { - // This FileInfo came from a Header (not the OS). Use the - // original Header to populate all remaining fields. - h.Uid = sys.Uid - h.Gid = sys.Gid - h.Uname = sys.Uname - h.Gname = sys.Gname - h.AccessTime = sys.AccessTime - h.ChangeTime = sys.ChangeTime - if sys.Xattrs != nil { - h.Xattrs = make(map[string]string) - for k, v := range sys.Xattrs { - h.Xattrs[k] = v - } - } - if sys.Typeflag == TypeLink { - // hard link - h.Typeflag = TypeLink - h.Size = 0 - h.Linkname = sys.Linkname - } - if sys.SparseHoles != nil { - h.SparseHoles = append([]SparseEntry{}, sys.SparseHoles...) - } - if sys.PAXRecords != nil { - h.PAXRecords = make(map[string]string) - for k, v := range sys.PAXRecords { - h.PAXRecords[k] = v - } - } - } - if sysStat != nil { - return h, sysStat(fi, h) - } - return h, nil -} - -// isHeaderOnlyType checks if the given type flag is of the type that has no -// data section even if a size is specified. -func isHeaderOnlyType(flag byte) bool { - switch flag { - case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: - return true - default: - return false - } -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/dmcgowan/go-tar/format.go b/vendor/github.com/dmcgowan/go-tar/format.go deleted file mode 100644 index cf1289534..000000000 --- a/vendor/github.com/dmcgowan/go-tar/format.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import "strings" - -// Format represents the tar archive format. -// -// The original tar format was introduced in Unix V7. -// Since then, there have been multiple competing formats attempting to -// standardize or extend the V7 format to overcome its limitations. -// The most common formats are the USTAR, PAX, and GNU formats, -// each with their own advantages and limitations. -// -// The following table captures the capabilities of each format: -// -// | USTAR | PAX | GNU -// ------------------+--------+-----------+---------- -// Name | 256B | unlimited | unlimited -// Linkname | 100B | unlimited | unlimited -// Size | uint33 | unlimited | uint89 -// Mode | uint21 | uint21 | uint57 -// Uid/Gid | uint21 | unlimited | uint57 -// Uname/Gname | 32B | unlimited | 32B -// ModTime | uint33 | unlimited | int89 -// AccessTime | n/a | unlimited | int89 -// ChangeTime | n/a | unlimited | int89 -// Devmajor/Devminor | uint21 | uint21 | uint57 -// ------------------+--------+-----------+---------- -// string encoding | ASCII | UTF-8 | binary -// sub-second times | no | yes | no -// sparse files | no | yes | yes -// -// The table's upper portion shows the Header fields, where each format reports -// the maximum number of bytes allowed for each string field and -// the integer type used to store each numeric field -// (where timestamps are stored as the number of seconds since the Unix epoch). -// -// The table's lower portion shows specialized features of each format, -// such as supported string encodings, support for sub-second timestamps, -// or support for sparse files. -type Format int - -// Constants to identify various tar formats. -const ( - // Deliberately hide the meaning of constants from public API. - _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc... - - // FormatUnknown indicates that the format is unknown. - FormatUnknown - - // The format of the original Unix V7 tar tool prior to standardization. - formatV7 - - // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988. - // - // While this format is compatible with most tar readers, - // the format has several limitations making it unsuitable for some usages. - // Most notably, it cannot support sparse files, files larger than 8GiB, - // filenames larger than 256 characters, and non-ASCII filenames. - // - // Reference: - // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 - FormatUSTAR - - // FormatPAX represents the PAX header format defined in POSIX.1-2001. - // - // PAX extends USTAR by writing a special file with Typeflag TypeXHeader - // preceding the original header. This file contains a set of key-value - // records, which are used to overcome USTAR's shortcomings, in addition to - // providing the ability to have sub-second resolution for timestamps. - // - // Some newer formats add their own extensions to PAX by defining their - // own keys and assigning certain semantic meaning to the associated values. - // For example, sparse file support in PAX is implemented using keys - // defined by the GNU manual (e.g., "GNU.sparse.map"). - // - // Reference: - // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html - FormatPAX - - // FormatGNU represents the GNU header format. - // - // The GNU header format is older than the USTAR and PAX standards and - // is not compatible with them. The GNU format supports - // arbitrary file sizes, filenames of arbitrary encoding and length, - // sparse files, and other features. - // - // It is recommended that PAX be chosen over GNU unless the target - // application can only parse GNU formatted archives. - // - // Reference: - // http://www.gnu.org/software/tar/manual/html_node/Standard.html - FormatGNU - - // Schily's tar format, which is incompatible with USTAR. - // This does not cover STAR extensions to the PAX format; these fall under - // the PAX format. - formatSTAR - - formatMax -) - -func (f Format) has(f2 Format) bool { return f&f2 != 0 } -func (f *Format) mayBe(f2 Format) { *f |= f2 } -func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } -func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } - -var formatNames = map[Format]string{ - formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", -} - -func (f Format) String() string { - var ss []string - for f2 := Format(1); f2 < formatMax; f2 <<= 1 { - if f.has(f2) { - ss = append(ss, formatNames[f2]) - } - } - switch len(ss) { - case 0: - return "" - case 1: - return ss[0] - default: - return "(" + strings.Join(ss, " | ") + ")" - } -} - -// Magics used to identify various formats. -const ( - magicGNU, versionGNU = "ustar ", " \x00" - magicUSTAR, versionUSTAR = "ustar\x00", "00" - trailerSTAR = "tar\x00" -) - -// Size constants from various tar specifications. -const ( - blockSize = 512 // Size of each block in a tar stream - nameSize = 100 // Max length of the name field in USTAR format - prefixSize = 155 // Max length of the prefix field in USTAR format -) - -// blockPadding computes the number of bytes needed to pad offset up to the -// nearest block edge where 0 <= n < blockSize. -func blockPadding(offset int64) (n int64) { - return -offset & (blockSize - 1) -} - -var zeroBlock block - -type block [blockSize]byte - -// Convert block to any number of formats. -func (b *block) V7() *headerV7 { return (*headerV7)(b) } -func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } -func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } -func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } -func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } - -// GetFormat checks that the block is a valid tar header based on the checksum. -// It then attempts to guess the specific format based on magic values. -// If the checksum fails, then FormatUnknown is returned. -func (b *block) GetFormat() Format { - // Verify checksum. - var p parser - value := p.parseOctal(b.V7().Chksum()) - chksum1, chksum2 := b.ComputeChecksum() - if p.err != nil || (value != chksum1 && value != chksum2) { - return FormatUnknown - } - - // Guess the magic values. - magic := string(b.USTAR().Magic()) - version := string(b.USTAR().Version()) - trailer := string(b.STAR().Trailer()) - switch { - case magic == magicUSTAR && trailer == trailerSTAR: - return formatSTAR - case magic == magicUSTAR: - return FormatUSTAR | FormatPAX - case magic == magicGNU && version == versionGNU: - return FormatGNU - default: - return formatV7 - } -} - -// SetFormat writes the magic values necessary for specified format -// and then updates the checksum accordingly. -func (b *block) SetFormat(format Format) { - // Set the magic values. - switch { - case format.has(formatV7): - // Do nothing. - case format.has(FormatGNU): - copy(b.GNU().Magic(), magicGNU) - copy(b.GNU().Version(), versionGNU) - case format.has(formatSTAR): - copy(b.STAR().Magic(), magicUSTAR) - copy(b.STAR().Version(), versionUSTAR) - copy(b.STAR().Trailer(), trailerSTAR) - case format.has(FormatUSTAR | FormatPAX): - copy(b.USTAR().Magic(), magicUSTAR) - copy(b.USTAR().Version(), versionUSTAR) - default: - panic("invalid format") - } - - // Update checksum. - // This field is special in that it is terminated by a NULL then space. - var f formatter - field := b.V7().Chksum() - chksum, _ := b.ComputeChecksum() // Possible values are 256..128776 - f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143 - field[7] = ' ' -} - -// ComputeChecksum computes the checksum for the header block. -// POSIX specifies a sum of the unsigned byte values, but the Sun tar used -// signed byte values. -// We compute and return both. -func (b *block) ComputeChecksum() (unsigned, signed int64) { - for i, c := range b { - if 148 <= i && i < 156 { - c = ' ' // Treat the checksum field itself as all spaces. - } - unsigned += int64(uint8(c)) - signed += int64(int8(c)) - } - return unsigned, signed -} - -// Reset clears the block with all zeros. -func (b *block) Reset() { - *b = block{} -} - -type headerV7 [blockSize]byte - -func (h *headerV7) Name() []byte { return h[000:][:100] } -func (h *headerV7) Mode() []byte { return h[100:][:8] } -func (h *headerV7) UID() []byte { return h[108:][:8] } -func (h *headerV7) GID() []byte { return h[116:][:8] } -func (h *headerV7) Size() []byte { return h[124:][:12] } -func (h *headerV7) ModTime() []byte { return h[136:][:12] } -func (h *headerV7) Chksum() []byte { return h[148:][:8] } -func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } -func (h *headerV7) LinkName() []byte { return h[157:][:100] } - -type headerGNU [blockSize]byte - -func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } -func (h *headerGNU) Magic() []byte { return h[257:][:6] } -func (h *headerGNU) Version() []byte { return h[263:][:2] } -func (h *headerGNU) UserName() []byte { return h[265:][:32] } -func (h *headerGNU) GroupName() []byte { return h[297:][:32] } -func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } -func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } -func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } -func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } -func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } -func (h *headerGNU) RealSize() []byte { return h[483:][:12] } - -type headerSTAR [blockSize]byte - -func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } -func (h *headerSTAR) Magic() []byte { return h[257:][:6] } -func (h *headerSTAR) Version() []byte { return h[263:][:2] } -func (h *headerSTAR) UserName() []byte { return h[265:][:32] } -func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } -func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } -func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } -func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } -func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } -func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } -func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } - -type headerUSTAR [blockSize]byte - -func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } -func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } -func (h *headerUSTAR) Version() []byte { return h[263:][:2] } -func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } -func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } -func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } -func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } -func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } - -type sparseArray []byte - -func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } -func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } -func (s sparseArray) MaxEntries() int { return len(s) / 24 } - -type sparseElem []byte - -func (s sparseElem) Offset() []byte { return s[00:][:12] } -func (s sparseElem) Length() []byte { return s[12:][:12] } diff --git a/vendor/github.com/dmcgowan/go-tar/reader.go b/vendor/github.com/dmcgowan/go-tar/reader.go deleted file mode 100644 index 1d0673020..000000000 --- a/vendor/github.com/dmcgowan/go-tar/reader.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "io" - "io/ioutil" - "strconv" - "strings" - "time" -) - -// Reader provides sequential access to the contents of a tar archive. -// Reader.Next advances to the next file in the archive (including the first), -// and then Reader can be treated as an io.Reader to access the file's data. -type Reader struct { - r io.Reader - pad int64 // Amount of padding (ignored) after current file entry - curr fileReader // Reader for current file entry - blk block // Buffer to use as temporary local storage - - // err is a persistent error. - // It is only the responsibility of every exported method of Reader to - // ensure that this error is sticky. - err error -} - -type fileReader interface { - io.Reader - fileState - - WriteTo(io.Writer) (int64, error) -} - -// NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { - return &Reader{r: r, curr: ®FileReader{r, 0}} -} - -// Next advances to the next entry in the tar archive. -// The Header.Size determines how many bytes can be read for the next file. -// Any remaining data in the current file is automatically discarded. -// -// io.EOF is returned at the end of the input. -func (tr *Reader) Next() (*Header, error) { - if tr.err != nil { - return nil, tr.err - } - hdr, err := tr.next() - tr.err = err - return hdr, err -} - -func (tr *Reader) next() (*Header, error) { - var paxHdrs map[string]string - var gnuLongName, gnuLongLink string - - // Externally, Next iterates through the tar archive as if it is a series of - // files. Internally, the tar format often uses fake "files" to add meta - // data that describes the next file. These meta data "files" should not - // normally be visible to the outside. As such, this loop iterates through - // one or more "header files" until it finds a "normal file". - format := FormatUSTAR | FormatPAX | FormatGNU -loop: - for { - // Discard the remainder of the file and any padding. - if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil { - return nil, err - } - if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil { - return nil, err - } - tr.pad = 0 - - hdr, rawHdr, err := tr.readHeader() - if err != nil { - return nil, err - } - if err := tr.handleRegularFile(hdr); err != nil { - return nil, err - } - format.mayOnlyBe(hdr.Format) - - // Check for PAX/GNU special headers and files. - switch hdr.Typeflag { - case TypeXHeader, TypeXGlobalHeader: - format.mayOnlyBe(FormatPAX) - paxHdrs, err = parsePAX(tr) - if err != nil { - return nil, err - } - if hdr.Typeflag == TypeXGlobalHeader { - mergePAX(hdr, paxHdrs) - return &Header{ - Typeflag: hdr.Typeflag, - Xattrs: hdr.Xattrs, - PAXRecords: hdr.PAXRecords, - Format: format, - }, nil - } - continue loop // This is a meta header affecting the next header - case TypeGNULongName, TypeGNULongLink: - format.mayOnlyBe(FormatGNU) - realname, err := ioutil.ReadAll(tr) - if err != nil { - return nil, err - } - - var p parser - switch hdr.Typeflag { - case TypeGNULongName: - gnuLongName = p.parseString(realname) - case TypeGNULongLink: - gnuLongLink = p.parseString(realname) - } - continue loop // This is a meta header affecting the next header - default: - // The old GNU sparse format is handled here since it is technically - // just a regular file with additional attributes. - - if err := mergePAX(hdr, paxHdrs); err != nil { - return nil, err - } - if gnuLongName != "" { - hdr.Name = gnuLongName - } - if gnuLongLink != "" { - hdr.Linkname = gnuLongLink - } - if hdr.Typeflag == TypeRegA && strings.HasSuffix(hdr.Name, "/") { - hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories - } - - // The extended headers may have updated the size. - // Thus, setup the regFileReader again after merging PAX headers. - if err := tr.handleRegularFile(hdr); err != nil { - return nil, err - } - - // Sparse formats rely on being able to read from the logical data - // section; there must be a preceding call to handleRegularFile. - if err := tr.handleSparseFile(hdr, rawHdr); err != nil { - return nil, err - } - - // Set the final guess at the format. - if format.has(FormatUSTAR) && format.has(FormatPAX) { - format.mayOnlyBe(FormatUSTAR) - } - hdr.Format = format - return hdr, nil // This is a file, so stop - } - } -} - -// handleRegularFile sets up the current file reader and padding such that it -// can only read the following logical data section. It will properly handle -// special headers that contain no data section. -func (tr *Reader) handleRegularFile(hdr *Header) error { - nb := hdr.Size - if isHeaderOnlyType(hdr.Typeflag) { - nb = 0 - } - if nb < 0 { - return ErrHeader - } - - tr.pad = blockPadding(nb) - tr.curr = ®FileReader{r: tr.r, nb: nb} - return nil -} - -// handleSparseFile checks if the current file is a sparse format of any type -// and sets the curr reader appropriately. -func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { - var spd sparseDatas - var err error - if hdr.Typeflag == TypeGNUSparse { - spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) - } else { - spd, err = tr.readGNUSparsePAXHeaders(hdr) - } - - // If sp is non-nil, then this is a sparse file. - // Note that it is possible for len(sp) == 0. - if err == nil && spd != nil { - if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { - return ErrHeader - } - sph := invertSparseEntries(spd, hdr.Size) - tr.curr = &sparseFileReader{tr.curr, sph, 0} - hdr.SparseHoles = append([]SparseEntry{}, sph...) - } - return err -} - -// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. -// If they are found, then this function reads the sparse map and returns it. -// This assumes that 0.0 headers have already been converted to 0.1 headers -// by the the PAX header parsing logic. -func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { - // Identify the version of GNU headers. - var is1x0 bool - major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] - switch { - case major == "0" && (minor == "0" || minor == "1"): - is1x0 = false - case major == "1" && minor == "0": - is1x0 = true - case major != "" || minor != "": - return nil, nil // Unknown GNU sparse PAX version - case hdr.PAXRecords[paxGNUSparseMap] != "": - is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess - default: - return nil, nil // Not a PAX format GNU sparse file. - } - hdr.Format.mayOnlyBe(FormatPAX) - - // Update hdr from GNU sparse PAX headers. - if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { - hdr.Name = name - } - size := hdr.PAXRecords[paxGNUSparseSize] - if size == "" { - size = hdr.PAXRecords[paxGNUSparseRealSize] - } - if size != "" { - n, err := strconv.ParseInt(size, 10, 64) - if err != nil { - return nil, ErrHeader - } - hdr.Size = n - } - - // Read the sparse map according to the appropriate format. - if is1x0 { - return readGNUSparseMap1x0(tr.curr) - } - return readGNUSparseMap0x1(hdr.PAXRecords) -} - -// mergePAX merges paxHdrs into hdr for all relevant fields of Header. -func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { - for k, v := range paxHdrs { - if v == "" { - continue // Keep the original USTAR value - } - var id64 int64 - switch k { - case paxPath: - hdr.Name = v - case paxLinkpath: - hdr.Linkname = v - case paxUname: - hdr.Uname = v - case paxGname: - hdr.Gname = v - case paxUid: - id64, err = strconv.ParseInt(v, 10, 64) - hdr.Uid = int(id64) // Integer overflow possible - case paxGid: - id64, err = strconv.ParseInt(v, 10, 64) - hdr.Gid = int(id64) // Integer overflow possible - case paxAtime: - hdr.AccessTime, err = parsePAXTime(v) - case paxMtime: - hdr.ModTime, err = parsePAXTime(v) - case paxCtime: - hdr.ChangeTime, err = parsePAXTime(v) - case paxSize: - hdr.Size, err = strconv.ParseInt(v, 10, 64) - default: - if strings.HasPrefix(k, paxSchilyXattr) { - if hdr.Xattrs == nil { - hdr.Xattrs = make(map[string]string) - } - hdr.Xattrs[k[len(paxSchilyXattr):]] = v - } - } - if err != nil { - return ErrHeader - } - } - hdr.PAXRecords = paxHdrs - return nil -} - -// parsePAX parses PAX headers. -// If an extended header (type 'x') is invalid, ErrHeader is returned -func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - sbuf := string(buf) - - // For GNU PAX sparse format 0.0 support. - // This function transforms the sparse format 0.0 headers into format 0.1 - // headers since 0.0 headers were not PAX compliant. - var sparseMap []string - - paxHdrs := make(map[string]string) - for len(sbuf) > 0 { - key, value, residual, err := parsePAXRecord(sbuf) - if err != nil { - return nil, ErrHeader - } - sbuf = residual - - switch key { - case paxGNUSparseOffset, paxGNUSparseNumBytes: - // Validate sparse header order and value. - if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || - (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || - strings.Contains(value, ",") { - return nil, ErrHeader - } - sparseMap = append(sparseMap, value) - default: - paxHdrs[key] = value - } - } - if len(sparseMap) > 0 { - paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") - } - return paxHdrs, nil -} - -// readHeader reads the next block header and assumes that the underlying reader -// is already aligned to a block boundary. It returns the raw block of the -// header in case further processing is required. -// -// The err will be set to io.EOF only when one of the following occurs: -// * Exactly 0 bytes are read and EOF is hit. -// * Exactly 1 block of zeros is read and EOF is hit. -// * At least 2 blocks of zeros are read. -func (tr *Reader) readHeader() (*Header, *block, error) { - // Two blocks of zero bytes marks the end of the archive. - if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { - return nil, nil, err // EOF is okay here; exactly 0 bytes read - } - if bytes.Equal(tr.blk[:], zeroBlock[:]) { - if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { - return nil, nil, err // EOF is okay here; exactly 1 block of zeros read - } - if bytes.Equal(tr.blk[:], zeroBlock[:]) { - return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read - } - return nil, nil, ErrHeader // Zero block and then non-zero block - } - - // Verify the header matches a known format. - format := tr.blk.GetFormat() - if format == FormatUnknown { - return nil, nil, ErrHeader - } - - var p parser - hdr := new(Header) - - // Unpack the V7 header. - v7 := tr.blk.V7() - hdr.Typeflag = v7.TypeFlag()[0] - hdr.Name = p.parseString(v7.Name()) - hdr.Linkname = p.parseString(v7.LinkName()) - hdr.Size = p.parseNumeric(v7.Size()) - hdr.Mode = p.parseNumeric(v7.Mode()) - hdr.Uid = int(p.parseNumeric(v7.UID())) - hdr.Gid = int(p.parseNumeric(v7.GID())) - hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) - - // Unpack format specific fields. - if format > formatV7 { - ustar := tr.blk.USTAR() - hdr.Uname = p.parseString(ustar.UserName()) - hdr.Gname = p.parseString(ustar.GroupName()) - hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) - hdr.Devminor = p.parseNumeric(ustar.DevMinor()) - - var prefix string - switch { - case format.has(FormatUSTAR | FormatPAX): - hdr.Format = format - ustar := tr.blk.USTAR() - prefix = p.parseString(ustar.Prefix()) - - // For Format detection, check if block is properly formatted since - // the parser is more liberal than what USTAR actually permits. - notASCII := func(r rune) bool { return r >= 0x80 } - if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { - hdr.Format = FormatUnknown // Non-ASCII characters in block. - } - nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } - if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && - nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { - hdr.Format = FormatUnknown // Numeric fields must end in NUL - } - case format.has(formatSTAR): - star := tr.blk.STAR() - prefix = p.parseString(star.Prefix()) - hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) - hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) - case format.has(FormatGNU): - hdr.Format = format - var p2 parser - gnu := tr.blk.GNU() - if b := gnu.AccessTime(); b[0] != 0 { - hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) - } - if b := gnu.ChangeTime(); b[0] != 0 { - hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) - } - - // Prior to Go1.8, the Writer had a bug where it would output - // an invalid tar file in certain rare situations because the logic - // incorrectly believed that the old GNU format had a prefix field. - // This is wrong and leads to an output file that mangles the - // atime and ctime fields, which are often left unused. - // - // In order to continue reading tar files created by former, buggy - // versions of Go, we skeptically parse the atime and ctime fields. - // If we are unable to parse them and the prefix field looks like - // an ASCII string, then we fallback on the pre-Go1.8 behavior - // of treating these fields as the USTAR prefix field. - // - // Note that this will not use the fallback logic for all possible - // files generated by a pre-Go1.8 toolchain. If the generated file - // happened to have a prefix field that parses as valid - // atime and ctime fields (e.g., when they are valid octal strings), - // then it is impossible to distinguish between an valid GNU file - // and an invalid pre-Go1.8 file. - // - // See https://golang.org/issues/12594 - // See https://golang.org/issues/21005 - if p2.err != nil { - hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} - ustar := tr.blk.USTAR() - if s := p.parseString(ustar.Prefix()); isASCII(s) { - prefix = s - } - hdr.Format = FormatUnknown // Buggy file is not GNU - } - } - if len(prefix) > 0 { - hdr.Name = prefix + "/" + hdr.Name - } - } - return hdr, &tr.blk, p.err -} - -// readOldGNUSparseMap reads the sparse map from the old GNU sparse format. -// The sparse map is stored in the tar header if it's small enough. -// If it's larger than four entries, then one or more extension headers are used -// to store the rest of the sparse map. -// -// The Header.Size does not reflect the size of any extended headers used. -// Thus, this function will read from the raw io.Reader to fetch extra headers. -// This method mutates blk in the process. -func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { - // Make sure that the input format is GNU. - // Unfortunately, the STAR format also has a sparse header format that uses - // the same type flag but has a completely different layout. - if blk.GetFormat() != FormatGNU { - return nil, ErrHeader - } - hdr.Format.mayOnlyBe(FormatGNU) - - var p parser - hdr.Size = p.parseNumeric(blk.GNU().RealSize()) - if p.err != nil { - return nil, p.err - } - s := blk.GNU().Sparse() - spd := make(sparseDatas, 0, s.MaxEntries()) - for { - for i := 0; i < s.MaxEntries(); i++ { - // This termination condition is identical to GNU and BSD tar. - if s.Entry(i).Offset()[0] == 0x00 { - break // Don't return, need to process extended headers (even if empty) - } - offset := p.parseNumeric(s.Entry(i).Offset()) - length := p.parseNumeric(s.Entry(i).Length()) - if p.err != nil { - return nil, p.err - } - spd = append(spd, SparseEntry{Offset: offset, Length: length}) - } - - if s.IsExtended()[0] > 0 { - // There are more entries. Read an extension header and parse its entries. - if _, err := mustReadFull(tr.r, blk[:]); err != nil { - return nil, err - } - s = blk.Sparse() - continue - } - return spd, nil // Done - } -} - -// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format -// version 1.0. The format of the sparse map consists of a series of -// newline-terminated numeric fields. The first field is the number of entries -// and is always present. Following this are the entries, consisting of two -// fields (offset, length). This function must stop reading at the end -// boundary of the block containing the last newline. -// -// Note that the GNU manual says that numeric values should be encoded in octal -// format. However, the GNU tar utility itself outputs these values in decimal. -// As such, this library treats values as being encoded in decimal. -func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { - var ( - cntNewline int64 - buf bytes.Buffer - blk block - ) - - // feedTokens copies data in blocks from r into buf until there are - // at least cnt newlines in buf. It will not read more blocks than needed. - feedTokens := func(n int64) error { - for cntNewline < n { - if _, err := mustReadFull(r, blk[:]); err != nil { - return err - } - buf.Write(blk[:]) - for _, c := range blk { - if c == '\n' { - cntNewline++ - } - } - } - return nil - } - - // nextToken gets the next token delimited by a newline. This assumes that - // at least one newline exists in the buffer. - nextToken := func() string { - cntNewline-- - tok, _ := buf.ReadString('\n') - return strings.TrimRight(tok, "\n") - } - - // Parse for the number of entries. - // Use integer overflow resistant math to check this. - if err := feedTokens(1); err != nil { - return nil, err - } - numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. - if err := feedTokens(2 * numEntries); err != nil { - return nil, err - } - spd := make(sparseDatas, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err1 := strconv.ParseInt(nextToken(), 10, 64) - length, err2 := strconv.ParseInt(nextToken(), 10, 64) - if err1 != nil || err2 != nil { - return nil, ErrHeader - } - spd = append(spd, SparseEntry{Offset: offset, Length: length}) - } - return spd, nil -} - -// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format -// version 0.1. The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { - // Get number of entries. - // Use integer overflow resistant math to check this. - numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] - numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // There should be two numbers in sparseMap for each entry. - sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") - if len(sparseMap) == 1 && sparseMap[0] == "" { - sparseMap = sparseMap[:0] - } - if int64(len(sparseMap)) != 2*numEntries { - return nil, ErrHeader - } - - // Loop through the entries in the sparse map. - // numEntries is trusted now. - spd := make(sparseDatas, 0, numEntries) - for len(sparseMap) >= 2 { - offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) - length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) - if err1 != nil || err2 != nil { - return nil, ErrHeader - } - spd = append(spd, SparseEntry{Offset: offset, Length: length}) - sparseMap = sparseMap[2:] - } - return spd, nil -} - -// Read reads from the current file in the tar archive. -// It returns (0, io.EOF) when it reaches the end of that file, -// until Next is called to advance to the next file. -// -// If the current file is sparse, then the regions marked as a hole -// are read back as NUL-bytes. -// -// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what -// the Header.Size claims. -func (tr *Reader) Read(b []byte) (int, error) { - if tr.err != nil { - return 0, tr.err - } - n, err := tr.curr.Read(b) - if err != nil && err != io.EOF { - tr.err = err - } - return n, err -} - -// WriteTo writes the content of the current file to w. -// The bytes written matches the number of remaining bytes in the current file. -// -// If the current file is sparse and w is an io.WriteSeeker, -// then WriteTo uses Seek to skip past holes defined in Header.SparseHoles, -// assuming that skipped regions are filled with NULs. -// This always writes the last byte to ensure w is the right size. -func (tr *Reader) WriteTo(w io.Writer) (int64, error) { - if tr.err != nil { - return 0, tr.err - } - n, err := tr.curr.WriteTo(w) - if err != nil { - tr.err = err - } - return n, err -} - -// regFileReader is a fileReader for reading data from a regular file entry. -type regFileReader struct { - r io.Reader // Underlying Reader - nb int64 // Number of remaining bytes to read -} - -func (fr *regFileReader) Read(b []byte) (n int, err error) { - if int64(len(b)) > fr.nb { - b = b[:fr.nb] - } - if len(b) > 0 { - n, err = fr.r.Read(b) - fr.nb -= int64(n) - } - switch { - case err == io.EOF && fr.nb > 0: - return n, io.ErrUnexpectedEOF - case err == nil && fr.nb == 0: - return n, io.EOF - default: - return n, err - } -} - -func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { - return io.Copy(w, struct{ io.Reader }{fr}) -} - -func (fr regFileReader) LogicalRemaining() int64 { - return fr.nb -} - -func (fr regFileReader) PhysicalRemaining() int64 { - return fr.nb -} - -// sparseFileReader is a fileReader for reading data from a sparse file entry. -type sparseFileReader struct { - fr fileReader // Underlying fileReader - sp sparseHoles // Normalized list of sparse holes - pos int64 // Current position in sparse file -} - -func (sr *sparseFileReader) Read(b []byte) (n int, err error) { - finished := int64(len(b)) >= sr.LogicalRemaining() - if finished { - b = b[:sr.LogicalRemaining()] - } - - b0 := b - endPos := sr.pos + int64(len(b)) - for endPos > sr.pos && err == nil { - var nf int // Bytes read in fragment - holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() - if sr.pos < holeStart { // In a data fragment - bf := b[:min(int64(len(b)), holeStart-sr.pos)] - nf, err = tryReadFull(sr.fr, bf) - } else { // In a hole fragment - bf := b[:min(int64(len(b)), holeEnd-sr.pos)] - nf, err = tryReadFull(zeroReader{}, bf) - } - b = b[nf:] - sr.pos += int64(nf) - if sr.pos >= holeEnd && len(sr.sp) > 1 { - sr.sp = sr.sp[1:] // Ensure last fragment always remains - } - } - - n = len(b0) - len(b) - switch { - case err == io.EOF: - return n, errMissData // Less data in dense file than sparse file - case err != nil: - return n, err - case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: - return n, errUnrefData // More data in dense file than sparse file - case finished: - return n, io.EOF - default: - return n, nil - } -} - -func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { - ws, ok := w.(io.WriteSeeker) - if ok { - if _, err := ws.Seek(0, io.SeekCurrent); err != nil { - ok = false // Not all io.Seeker can really seek - } - } - if !ok { - return io.Copy(w, struct{ io.Reader }{sr}) - } - - var writeLastByte bool - pos0 := sr.pos - for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { - var nf int64 // Size of fragment - holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() - if sr.pos < holeStart { // In a data fragment - nf = holeStart - sr.pos - nf, err = io.CopyN(ws, sr.fr, nf) - } else { // In a hole fragment - nf = holeEnd - sr.pos - if sr.PhysicalRemaining() == 0 { - writeLastByte = true - nf-- - } - _, err = ws.Seek(nf, io.SeekCurrent) - } - sr.pos += nf - if sr.pos >= holeEnd && len(sr.sp) > 1 { - sr.sp = sr.sp[1:] // Ensure last fragment always remains - } - } - - // If the last fragment is a hole, then seek to 1-byte before EOF, and - // write a single byte to ensure the file is the right size. - if writeLastByte && err == nil { - _, err = ws.Write([]byte{0}) - sr.pos++ - } - - n = sr.pos - pos0 - switch { - case err == io.EOF: - return n, errMissData // Less data in dense file than sparse file - case err != nil: - return n, err - case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: - return n, errUnrefData // More data in dense file than sparse file - default: - return n, nil - } -} - -func (sr sparseFileReader) LogicalRemaining() int64 { - return sr.sp[len(sr.sp)-1].endOffset() - sr.pos -} -func (sr sparseFileReader) PhysicalRemaining() int64 { - return sr.fr.PhysicalRemaining() -} - -type zeroReader struct{} - -func (zeroReader) Read(b []byte) (int, error) { - for i := range b { - b[i] = 0 - } - return len(b), nil -} - -// mustReadFull is like io.ReadFull except it returns -// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. -func mustReadFull(r io.Reader, b []byte) (int, error) { - n, err := tryReadFull(r, b) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return n, err -} - -// tryReadFull is like io.ReadFull except it returns -// io.EOF when it is hit before len(b) bytes are read. -func tryReadFull(r io.Reader, b []byte) (n int, err error) { - for len(b) > n && err == nil { - var nn int - nn, err = r.Read(b[n:]) - n += nn - } - if len(b) == n && err == io.EOF { - err = nil - } - return n, err -} - -// discard skips n bytes in r, reporting an error if unable to do so. -func discard(r io.Reader, n int64) error { - // If possible, Seek to the last byte before the end of the data section. - // Do this because Seek is often lazy about reporting errors; this will mask - // the fact that the stream may be truncated. We can rely on the - // io.CopyN done shortly afterwards to trigger any IO errors. - var seekSkipped int64 // Number of bytes skipped via Seek - if sr, ok := r.(io.Seeker); ok && n > 1 { - // Not all io.Seeker can actually Seek. For example, os.Stdin implements - // io.Seeker, but calling Seek always returns an error and performs - // no action. Thus, we try an innocent seek to the current position - // to see if Seek is really supported. - pos1, err := sr.Seek(0, io.SeekCurrent) - if pos1 >= 0 && err == nil { - // Seek seems supported, so perform the real Seek. - pos2, err := sr.Seek(n-1, io.SeekCurrent) - if pos2 < 0 || err != nil { - return err - } - seekSkipped = pos2 - pos1 - } - } - - copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped) - if err == io.EOF && seekSkipped+copySkipped < n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_unix.go b/vendor/github.com/dmcgowan/go-tar/sparse_unix.go deleted file mode 100644 index c623c1ee4..000000000 --- a/vendor/github.com/dmcgowan/go-tar/sparse_unix.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin dragonfly freebsd openbsd netbsd solaris - -package tar - -import ( - "io" - "os" - "runtime" - "syscall" -) - -func init() { - sysSparseDetect = sparseDetectUnix -} - -func sparseDetectUnix(f *os.File) (sph sparseHoles, err error) { - // SEEK_DATA and SEEK_HOLE originated from Solaris and support for it - // has been added to most of the other major Unix systems. - var seekData, seekHole = 3, 4 // SEEK_DATA/SEEK_HOLE from unistd.h - - if runtime.GOOS == "darwin" { - // Darwin has the constants swapped, compared to all other UNIX. - seekData, seekHole = 4, 3 - } - - // Check for seekData/seekHole support. - // Different OS and FS may differ in the exact errno that is returned when - // there is no support. Rather than special-casing every possible errno - // representing "not supported", just assume that a non-nil error means - // that seekData/seekHole is not supported. - if _, err := f.Seek(0, seekHole); err != nil { - return nil, nil - } - - // Populate the SparseHoles. - var last, pos int64 = -1, 0 - for { - // Get the location of the next hole section. - if pos, err = fseek(f, pos, seekHole); pos == last || err != nil { - return sph, err - } - offset := pos - last = pos - - // Get the location of the next data section. - if pos, err = fseek(f, pos, seekData); pos == last || err != nil { - return sph, err - } - length := pos - offset - last = pos - - if length > 0 { - sph = append(sph, SparseEntry{offset, length}) - } - } -} - -func fseek(f *os.File, pos int64, whence int) (int64, error) { - pos, err := f.Seek(pos, whence) - if errno(err) == syscall.ENXIO { - // SEEK_DATA returns ENXIO when past the last data fragment, - // which makes determining the size of the last hole difficult. - pos, err = f.Seek(0, io.SeekEnd) - } - return pos, err -} - -func errno(err error) error { - if perr, ok := err.(*os.PathError); ok { - return perr.Err - } - return err -} diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_windows.go b/vendor/github.com/dmcgowan/go-tar/sparse_windows.go deleted file mode 100644 index 05bf1a90b..000000000 --- a/vendor/github.com/dmcgowan/go-tar/sparse_windows.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package tar - -import ( - "os" - "syscall" - "unsafe" -) - -var errInvalidFunc = syscall.Errno(1) // ERROR_INVALID_FUNCTION from WinError.h - -func init() { - sysSparseDetect = sparseDetectWindows - sysSparsePunch = sparsePunchWindows -} - -func sparseDetectWindows(f *os.File) (sph sparseHoles, err error) { - const queryAllocRanges = 0x000940CF // FSCTL_QUERY_ALLOCATED_RANGES from WinIoCtl.h - type allocRangeBuffer struct{ offset, length int64 } // FILE_ALLOCATED_RANGE_BUFFER from WinIoCtl.h - - s, err := f.Stat() - if err != nil { - return nil, err - } - - queryRange := allocRangeBuffer{0, s.Size()} - allocRanges := make([]allocRangeBuffer, 64) - - // Repeatedly query for ranges until the input buffer is large enough. - var bytesReturned uint32 - for { - err := syscall.DeviceIoControl( - syscall.Handle(f.Fd()), queryAllocRanges, - (*byte)(unsafe.Pointer(&queryRange)), uint32(unsafe.Sizeof(queryRange)), - (*byte)(unsafe.Pointer(&allocRanges[0])), uint32(len(allocRanges)*int(unsafe.Sizeof(allocRanges[0]))), - &bytesReturned, nil, - ) - if err == syscall.ERROR_MORE_DATA { - allocRanges = make([]allocRangeBuffer, 2*len(allocRanges)) - continue - } - if err == errInvalidFunc { - return nil, nil // Sparse file not supported on this FS - } - if err != nil { - return nil, err - } - break - } - n := bytesReturned / uint32(unsafe.Sizeof(allocRanges[0])) - allocRanges = append(allocRanges[:n], allocRangeBuffer{s.Size(), 0}) - - // Invert the data fragments into hole fragments. - var pos int64 - for _, r := range allocRanges { - if r.offset > pos { - sph = append(sph, SparseEntry{pos, r.offset - pos}) - } - pos = r.offset + r.length - } - return sph, nil -} - -func sparsePunchWindows(f *os.File, sph sparseHoles) error { - const setSparse = 0x000900C4 // FSCTL_SET_SPARSE from WinIoCtl.h - const setZeroData = 0x000980C8 // FSCTL_SET_ZERO_DATA from WinIoCtl.h - type zeroDataInfo struct{ start, end int64 } // FILE_ZERO_DATA_INFORMATION from WinIoCtl.h - - // Set the file as being sparse. - var bytesReturned uint32 - devErr := syscall.DeviceIoControl( - syscall.Handle(f.Fd()), setSparse, - nil, 0, nil, 0, - &bytesReturned, nil, - ) - if devErr != nil && devErr != errInvalidFunc { - return devErr - } - - // Set the file to the right size. - var size int64 - if len(sph) > 0 { - size = sph[len(sph)-1].endOffset() - } - if err := f.Truncate(size); err != nil { - return err - } - if devErr == errInvalidFunc { - // Sparse file not supported on this FS. - // Call sparsePunchManual since SetEndOfFile does not guarantee that - // the extended space is filled with zeros. - return sparsePunchManual(f, sph) - } - - // Punch holes for all relevant fragments. - for _, s := range sph { - zdi := zeroDataInfo{s.Offset, s.endOffset()} - err := syscall.DeviceIoControl( - syscall.Handle(f.Fd()), setZeroData, - (*byte)(unsafe.Pointer(&zdi)), uint32(unsafe.Sizeof(zdi)), - nil, 0, - &bytesReturned, nil, - ) - if err != nil { - return err - } - } - return nil -} - -// sparsePunchManual writes zeros into each hole. -func sparsePunchManual(f *os.File, sph sparseHoles) error { - const chunkSize = 32 << 10 - zbuf := make([]byte, chunkSize) - for _, s := range sph { - for pos := s.Offset; pos < s.endOffset(); pos += chunkSize { - n := min(chunkSize, s.endOffset()-pos) - if _, err := f.WriteAt(zbuf[:n], pos); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_actime1.go b/vendor/github.com/dmcgowan/go-tar/stat_actime1.go deleted file mode 100644 index cf9cc79c5..000000000 --- a/vendor/github.com/dmcgowan/go-tar/stat_actime1.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux dragonfly openbsd solaris - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atim.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctim.Unix()) -} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_actime2.go b/vendor/github.com/dmcgowan/go-tar/stat_actime2.go deleted file mode 100644 index 6f17dbe30..000000000 --- a/vendor/github.com/dmcgowan/go-tar/stat_actime2.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd netbsd - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atimespec.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctimespec.Unix()) -} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_unix.go b/vendor/github.com/dmcgowan/go-tar/stat_unix.go deleted file mode 100644 index 868105f33..000000000 --- a/vendor/github.com/dmcgowan/go-tar/stat_unix.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin dragonfly freebsd openbsd netbsd solaris - -package tar - -import ( - "os" - "os/user" - "runtime" - "strconv" - "sync" - "syscall" -) - -func init() { - sysStat = statUnix -} - -// userMap and groupMap caches UID and GID lookups for performance reasons. -// The downside is that renaming uname or gname by the OS never takes effect. -var userMap, groupMap sync.Map // map[int]string - -func statUnix(fi os.FileInfo, h *Header) error { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - h.Uid = int(sys.Uid) - h.Gid = int(sys.Gid) - - // Best effort at populating Uname and Gname. - // The os/user functions may fail for any number of reasons - // (not implemented on that platform, cgo not enabled, etc). - if u, ok := userMap.Load(h.Uid); ok { - h.Uname = u.(string) - } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { - h.Uname = u.Username - userMap.Store(h.Uid, h.Uname) - } - if g, ok := groupMap.Load(h.Gid); ok { - h.Gname = g.(string) - } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { - h.Gname = g.Name - groupMap.Store(h.Gid, h.Gname) - } - - h.AccessTime = statAtime(sys) - h.ChangeTime = statCtime(sys) - - // Best effort at populating Devmajor and Devminor. - if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { - dev := uint64(sys.Rdev) // May be int32 or uint32 - switch runtime.GOOS { - case "linux": - // Copied from golang.org/x/sys/unix/dev_linux.go. - major := uint32((dev & 0x00000000000fff00) >> 8) - major |= uint32((dev & 0xfffff00000000000) >> 32) - minor := uint32((dev & 0x00000000000000ff) >> 0) - minor |= uint32((dev & 0x00000ffffff00000) >> 12) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "darwin": - // Copied from golang.org/x/sys/unix/dev_darwin.go. - major := uint32((dev >> 24) & 0xff) - minor := uint32(dev & 0xffffff) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "dragonfly": - // Copied from golang.org/x/sys/unix/dev_dragonfly.go. - major := uint32((dev >> 8) & 0xff) - minor := uint32(dev & 0xffff00ff) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "freebsd": - // Copied from golang.org/x/sys/unix/dev_freebsd.go. - major := uint32((dev >> 8) & 0xff) - minor := uint32(dev & 0xffff00ff) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "netbsd": - // Copied from golang.org/x/sys/unix/dev_netbsd.go. - major := uint32((dev & 0x000fff00) >> 8) - minor := uint32((dev & 0x000000ff) >> 0) - minor |= uint32((dev & 0xfff00000) >> 12) - h.Devmajor, h.Devminor = int64(major), int64(minor) - case "openbsd": - // Copied from golang.org/x/sys/unix/dev_openbsd.go. - major := uint32((dev & 0x0000ff00) >> 8) - minor := uint32((dev & 0x000000ff) >> 0) - minor |= uint32((dev & 0xffff0000) >> 8) - h.Devmajor, h.Devminor = int64(major), int64(minor) - default: - // TODO: Implement solaris (see https://golang.org/issue/8106) - } - } - return nil -} diff --git a/vendor/github.com/dmcgowan/go-tar/strconv.go b/vendor/github.com/dmcgowan/go-tar/strconv.go deleted file mode 100644 index 8bbd65cd1..000000000 --- a/vendor/github.com/dmcgowan/go-tar/strconv.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "fmt" - "strconv" - "strings" - "time" -) - -// hasNUL reports whether the NUL character exists within s. -func hasNUL(s string) bool { - return strings.IndexByte(s, 0) >= 0 -} - -// isASCII reports whether the input is an ASCII C-style string. -func isASCII(s string) bool { - for _, c := range s { - if c >= 0x80 || c == 0x00 { - return false - } - } - return true -} - -// toASCII converts the input to an ASCII C-style string. -// This a best effort conversion, so invalid characters are dropped. -func toASCII(s string) string { - if isASCII(s) { - return s - } - b := make([]byte, 0, len(s)) - for _, c := range s { - if c < 0x80 && c != 0x00 { - b = append(b, byte(c)) - } - } - return string(b) -} - -type parser struct { - err error // Last error seen -} - -type formatter struct { - err error // Last error seen -} - -// parseString parses bytes as a NUL-terminated C-style string. -// If a NUL byte is not found then the whole slice is returned as a string. -func (*parser) parseString(b []byte) string { - if i := bytes.IndexByte(b, 0); i >= 0 { - return string(b[:i]) - } - return string(b) -} - -// formatString copies s into b, NUL-terminating if possible. -func (f *formatter) formatString(b []byte, s string) { - if len(s) > len(b) { - f.err = ErrFieldTooLong - } - copy(b, s) - if len(s) < len(b) { - b[len(s)] = 0 - } - - // Some buggy readers treat regular files with a trailing slash - // in the V7 path field as a directory even though the full path - // recorded elsewhere (e.g., via PAX record) contains no trailing slash. - if len(s) > len(b) && b[len(b)-1] == '/' { - n := len(strings.TrimRight(s[:len(b)], "/")) - b[n] = 0 // Replace trailing slash with NUL terminator - } -} - -// fitsInBase256 reports whether x can be encoded into n bytes using base-256 -// encoding. Unlike octal encoding, base-256 encoding does not require that the -// string ends with a NUL character. Thus, all n bytes are available for output. -// -// If operating in binary mode, this assumes strict GNU binary mode; which means -// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is -// equivalent to the sign bit in two's complement form. -func fitsInBase256(n int, x int64) bool { - binBits := uint(n-1) * 8 - return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 { - // Handling negative numbers relies on the following identity: - // -a-1 == ^a - // - // If the number is negative, we use an inversion mask to invert the - // data bytes and treat the value as an unsigned number. - var inv byte // 0x00 if positive or zero, 0xff if negative - if b[0]&0x40 != 0 { - inv = 0xff - } - - var x uint64 - for i, c := range b { - c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing - if i == 0 { - c &= 0x7f // Ignore signal bit in first byte - } - if (x >> 56) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - x = x<<8 | uint64(c) - } - if (x >> 63) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - if inv == 0xff { - return ^int64(x) - } - return int64(x) - } - - // Normal case is base-8 (octal) format. - return p.parseOctal(b) -} - -// formatNumeric encodes x into b using base-8 (octal) encoding if possible. -// Otherwise it will attempt to use base-256 (binary) encoding. -func (f *formatter) formatNumeric(b []byte, x int64) { - if fitsInOctal(len(b), x) { - f.formatOctal(b, x) - return - } - - if fitsInBase256(len(b), x) { - for i := len(b) - 1; i >= 0; i-- { - b[i] = byte(x) - x >>= 8 - } - b[0] |= 0x80 // Highest bit indicates binary format - return - } - - f.formatOctal(b, 0) // Last resort, just write zero - f.err = ErrFieldTooLong -} - -func (p *parser) parseOctal(b []byte) int64 { - // Because unused fields are filled with NULs, we need - // to skip leading NULs. Fields may also be padded with - // spaces or NULs. - // So we remove leading and trailing NULs and spaces to - // be sure. - b = bytes.Trim(b, " \x00") - - if len(b) == 0 { - return 0 - } - x, perr := strconv.ParseUint(p.parseString(b), 8, 64) - if perr != nil { - p.err = ErrHeader - } - return int64(x) -} - -func (f *formatter) formatOctal(b []byte, x int64) { - if !fitsInOctal(len(b), x) { - x = 0 // Last resort, just write zero - f.err = ErrFieldTooLong - } - - s := strconv.FormatInt(x, 8) - // Add leading zeros, but leave room for a NUL. - if n := len(b) - len(s) - 1; n > 0 { - s = strings.Repeat("0", n) + s - } - f.formatString(b, s) -} - -// fitsInOctal reports whether the integer x fits in a field n-bytes long -// using octal encoding with the appropriate NUL terminator. -func fitsInOctal(n int, x int64) bool { - octBits := uint(n-1) * 3 - return x >= 0 && (n >= 22 || x < 1<= 0 { - ss, sn = s[:pos], s[pos+1:] - } - - // Parse the seconds. - secs, err := strconv.ParseInt(ss, 10, 64) - if err != nil { - return time.Time{}, ErrHeader - } - if len(sn) == 0 { - return time.Unix(secs, 0), nil // No sub-second values - } - - // Parse the nanoseconds. - if strings.Trim(sn, "0123456789") != "" { - return time.Time{}, ErrHeader - } - if len(sn) < maxNanoSecondDigits { - sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad - } else { - sn = sn[:maxNanoSecondDigits] // Right truncate - } - nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed - if len(ss) > 0 && ss[0] == '-' { - return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction - } - return time.Unix(secs, int64(nsecs)), nil -} - -// formatPAXTime converts ts into a time of the form %d.%d as described in the -// PAX specification. This function is capable of negative timestamps. -func formatPAXTime(ts time.Time) (s string) { - secs, nsecs := ts.Unix(), ts.Nanosecond() - if nsecs == 0 { - return strconv.FormatInt(secs, 10) - } - - // If seconds is negative, then perform correction. - sign := "" - if secs < 0 { - sign = "-" // Remember sign - secs = -(secs + 1) // Add a second to secs - nsecs = -(nsecs - 1E9) // Take that second away from nsecs - } - return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") -} - -// parsePAXRecord parses the input PAX record string into a key-value pair. -// If parsing is successful, it will slice off the currently read record and -// return the remainder as r. -func parsePAXRecord(s string) (k, v, r string, err error) { - // The size field ends at the first space. - sp := strings.IndexByte(s, ' ') - if sp == -1 { - return "", "", s, ErrHeader - } - - // Parse the first token as a decimal integer. - n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int - if perr != nil || n < 5 || int64(len(s)) < n { - return "", "", s, ErrHeader - } - - // Extract everything between the space and the final newline. - rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] - if nl != "\n" { - return "", "", s, ErrHeader - } - - // The first equals separates the key from the value. - eq := strings.IndexByte(rec, '=') - if eq == -1 { - return "", "", s, ErrHeader - } - k, v = rec[:eq], rec[eq+1:] - - if !validPAXRecord(k, v) { - return "", "", s, ErrHeader - } - return k, v, rem, nil -} - -// formatPAXRecord formats a single PAX record, prefixing it with the -// appropriate length. -func formatPAXRecord(k, v string) (string, error) { - if !validPAXRecord(k, v) { - return "", ErrHeader - } - - const padding = 3 // Extra padding for ' ', '=', and '\n' - size := len(k) + len(v) + padding - size += len(strconv.Itoa(size)) - record := strconv.Itoa(size) + " " + k + "=" + v + "\n" - - // Final adjustment if adding size field increased the record size. - if len(record) != size { - size = len(record) - record = strconv.Itoa(size) + " " + k + "=" + v + "\n" - } - return record, nil -} - -// validPAXRecord reports whether the key-value pair is valid where each -// record is formatted as: -// "%d %s=%s\n" % (size, key, value) -// -// Keys and values should be UTF-8, but the number of bad writers out there -// forces us to be a more liberal. -// Thus, we only reject all keys with NUL, and only reject NULs in values -// for the PAX version of the USTAR string fields. -// The key must not contain an '=' character. -func validPAXRecord(k, v string) bool { - if k == "" || strings.IndexByte(k, '=') >= 0 { - return false - } - switch k { - case paxPath, paxLinkpath, paxUname, paxGname: - return !hasNUL(v) - default: - return !hasNUL(k) - } -} diff --git a/vendor/github.com/dmcgowan/go-tar/writer.go b/vendor/github.com/dmcgowan/go-tar/writer.go deleted file mode 100644 index 2eed61934..000000000 --- a/vendor/github.com/dmcgowan/go-tar/writer.go +++ /dev/null @@ -1,629 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "fmt" - "io" - "path" - "sort" - "strconv" - "strings" - "time" -) - -// Writer provides sequential writing of a tar archive. -// Write.WriteHeader begins a new file with the provided Header, -// and then Writer can be treated as an io.Writer to supply that file's data. -type Writer struct { - w io.Writer - pad int64 // Amount of padding to write after current file entry - curr fileWriter // Writer for current file entry - hdr Header // Shallow copy of Header that is safe for mutations - blk block // Buffer to use as temporary local storage - - // err is a persistent error. - // It is only the responsibility of every exported method of Writer to - // ensure that this error is sticky. - err error -} - -// NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { - return &Writer{w: w, curr: ®FileWriter{w, 0}} -} - -type fileWriter interface { - io.Writer - fileState - - ReadFrom(io.Reader) (int64, error) -} - -// Flush finishes writing the current file's block padding. -// The current file must be fully written before Flush can be called. -// -// Deprecated: This is unnecessary as the next call to WriteHeader or Close -// will implicitly flush out the file's padding. -func (tw *Writer) Flush() error { - if tw.err != nil { - return tw.err - } - if nb := tw.curr.LogicalRemaining(); nb > 0 { - return fmt.Errorf("tar: missed writing %d bytes", nb) - } - if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { - return tw.err - } - tw.pad = 0 - return nil -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// The Header.Size determines how many bytes can be written for the next file. -// If the current file is not fully written, then this returns an error. -// This implicitly flushes any padding necessary before writing the header. -func (tw *Writer) WriteHeader(hdr *Header) error { - if err := tw.Flush(); err != nil { - return err - } - tw.hdr = *hdr // Shallow copy of Header - - // Round ModTime and ignore AccessTime and ChangeTime unless - // the format is explicitly chosen. - // This ensures nominal usage of WriteHeader (without specifying the format) - // does not always result in the PAX format being chosen, which - // causes a 1KiB increase to every header. - if tw.hdr.Format == FormatUnknown { - tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) - tw.hdr.AccessTime = time.Time{} - tw.hdr.ChangeTime = time.Time{} - } - - allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() - switch { - case allowedFormats.has(FormatUSTAR): - tw.err = tw.writeUSTARHeader(&tw.hdr) - return tw.err - case allowedFormats.has(FormatPAX): - tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) - return tw.err - case allowedFormats.has(FormatGNU): - tw.err = tw.writeGNUHeader(&tw.hdr) - return tw.err - default: - return err // Non-fatal error - } -} - -func (tw *Writer) writeUSTARHeader(hdr *Header) error { - // Check if we can use USTAR prefix/suffix splitting. - var namePrefix string - if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { - namePrefix, hdr.Name = prefix, suffix - } - - // Pack the main header. - var f formatter - blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) - f.formatString(blk.USTAR().Prefix(), namePrefix) - blk.SetFormat(FormatUSTAR) - if f.err != nil { - return f.err // Should never happen since header is validated - } - return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) -} - -func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { - realName, realSize := hdr.Name, hdr.Size - - // Handle sparse files. - var spd sparseDatas - var spb []byte - if len(hdr.SparseHoles) > 0 { - sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map - sph = alignSparseEntries(sph, hdr.Size) - spd = invertSparseEntries(sph, hdr.Size) - - // Format the sparse map. - hdr.Size = 0 // Replace with encoded size - spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') - for _, s := range spd { - hdr.Size += s.Length - spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') - spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') - } - pad := blockPadding(int64(len(spb))) - spb = append(spb, zeroBlock[:pad]...) - hdr.Size += int64(len(spb)) // Accounts for encoded sparse map - - // Add and modify appropriate PAX records. - dir, file := path.Split(realName) - hdr.Name = path.Join(dir, "GNUSparseFile.0", file) - paxHdrs[paxGNUSparseMajor] = "1" - paxHdrs[paxGNUSparseMinor] = "0" - paxHdrs[paxGNUSparseName] = realName - paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) - paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) - delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName - } - - // Write PAX records to the output. - isGlobal := hdr.Typeflag == TypeXGlobalHeader - if len(paxHdrs) > 0 || isGlobal { - // Sort keys for deterministic ordering. - var keys []string - for k := range paxHdrs { - keys = append(keys, k) - } - sort.Strings(keys) - - // Write each record to a buffer. - var buf bytes.Buffer - for _, k := range keys { - rec, err := formatPAXRecord(k, paxHdrs[k]) - if err != nil { - return err - } - buf.WriteString(rec) - } - - // Write the extended header file. - var name string - var flag byte - if isGlobal { - name = "GlobalHead.0.0" - flag = TypeXGlobalHeader - } else { - dir, file := path.Split(realName) - name = path.Join(dir, "PaxHeaders.0", file) - flag = TypeXHeader - } - data := buf.String() - if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { - return err // Global headers return here - } - } - - // Pack the main header. - var f formatter // Ignore errors since they are expected - fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } - blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) - blk.SetFormat(FormatPAX) - if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { - return err - } - - // Write the sparse map and setup the sparse writer if necessary. - if len(spd) > 0 { - // Use tw.curr since the sparse map is accounted for in hdr.Size. - if _, err := tw.curr.Write(spb); err != nil { - return err - } - tw.curr = &sparseFileWriter{tw.curr, spd, 0} - } - return nil -} - -func (tw *Writer) writeGNUHeader(hdr *Header) error { - // Use long-link files if Name or Linkname exceeds the field size. - const longName = "././@LongLink" - if len(hdr.Name) > nameSize { - data := hdr.Name + "\x00" - if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { - return err - } - } - if len(hdr.Linkname) > nameSize { - data := hdr.Linkname + "\x00" - if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { - return err - } - } - - // Pack the main header. - var f formatter // Ignore errors since they are expected - var spd sparseDatas - var spb []byte - blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) - if !hdr.AccessTime.IsZero() { - f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) - } - if !hdr.ChangeTime.IsZero() { - f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) - } - if hdr.Typeflag == TypeGNUSparse { - sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map - sph = alignSparseEntries(sph, hdr.Size) - spd = invertSparseEntries(sph, hdr.Size) - - // Format the sparse map. - formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { - for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { - f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) - f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) - sp = sp[1:] - } - if len(sp) > 0 { - sa.IsExtended()[0] = 1 - } - return sp - } - sp2 := formatSPD(spd, blk.GNU().Sparse()) - for len(sp2) > 0 { - var spHdr block - sp2 = formatSPD(sp2, spHdr.Sparse()) - spb = append(spb, spHdr[:]...) - } - - // Update size fields in the header block. - realSize := hdr.Size - hdr.Size = 0 // Encoded size; does not account for encoded sparse map - for _, s := range spd { - hdr.Size += s.Length - } - copy(blk.V7().Size(), zeroBlock[:]) // Reset field - f.formatNumeric(blk.V7().Size(), hdr.Size) - f.formatNumeric(blk.GNU().RealSize(), realSize) - } - blk.SetFormat(FormatGNU) - if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { - return err - } - - // Write the extended sparse map and setup the sparse writer if necessary. - if len(spd) > 0 { - // Use tw.w since the sparse map is not accounted for in hdr.Size. - if _, err := tw.w.Write(spb); err != nil { - return err - } - tw.curr = &sparseFileWriter{tw.curr, spd, 0} - } - return nil -} - -type ( - stringFormatter func([]byte, string) - numberFormatter func([]byte, int64) -) - -// templateV7Plus fills out the V7 fields of a block using values from hdr. -// It also fills out fields (uname, gname, devmajor, devminor) that are -// shared in the USTAR, PAX, and GNU formats using the provided formatters. -// -// The block returned is only valid until the next call to -// templateV7Plus or writeRawFile. -func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { - tw.blk.Reset() - - modTime := hdr.ModTime - if modTime.IsZero() { - modTime = time.Unix(0, 0) - } - - v7 := tw.blk.V7() - v7.TypeFlag()[0] = hdr.Typeflag - fmtStr(v7.Name(), hdr.Name) - fmtStr(v7.LinkName(), hdr.Linkname) - fmtNum(v7.Mode(), hdr.Mode) - fmtNum(v7.UID(), int64(hdr.Uid)) - fmtNum(v7.GID(), int64(hdr.Gid)) - fmtNum(v7.Size(), hdr.Size) - fmtNum(v7.ModTime(), modTime.Unix()) - - ustar := tw.blk.USTAR() - fmtStr(ustar.UserName(), hdr.Uname) - fmtStr(ustar.GroupName(), hdr.Gname) - fmtNum(ustar.DevMajor(), hdr.Devmajor) - fmtNum(ustar.DevMinor(), hdr.Devminor) - - return &tw.blk -} - -// writeRawFile writes a minimal file with the given name and flag type. -// It uses format to encode the header format and will write data as the body. -// It uses default values for all of the other fields (as BSD and GNU tar does). -func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { - tw.blk.Reset() - - // Best effort for the filename. - name = toASCII(name) - if len(name) > nameSize { - name = name[:nameSize] - } - name = strings.TrimRight(name, "/") - - var f formatter - v7 := tw.blk.V7() - v7.TypeFlag()[0] = flag - f.formatString(v7.Name(), name) - f.formatOctal(v7.Mode(), 0) - f.formatOctal(v7.UID(), 0) - f.formatOctal(v7.GID(), 0) - f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB - f.formatOctal(v7.ModTime(), 0) - tw.blk.SetFormat(format) - if f.err != nil { - return f.err // Only occurs if size condition is violated - } - - // Write the header and data. - if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { - return err - } - _, err := io.WriteString(tw, data) - return err -} - -// writeRawHeader writes the value of blk, regardless of its value. -// It sets up the Writer such that it can accept a file of the given size. -// If the flag is a special header-only flag, then the size is treated as zero. -func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { - if err := tw.Flush(); err != nil { - return err - } - if _, err := tw.w.Write(blk[:]); err != nil { - return err - } - if isHeaderOnlyType(flag) { - size = 0 - } - tw.curr = ®FileWriter{tw.w, size} - tw.pad = blockPadding(size) - return nil -} - -// splitUSTARPath splits a path according to USTAR prefix and suffix rules. -// If the path is not splittable, then it will return ("", "", false). -func splitUSTARPath(name string) (prefix, suffix string, ok bool) { - length := len(name) - if length <= nameSize || !isASCII(name) { - return "", "", false - } else if length > prefixSize+1 { - length = prefixSize + 1 - } else if name[length-1] == '/' { - length-- - } - - i := strings.LastIndex(name[:length], "/") - nlen := len(name) - i - 1 // nlen is length of suffix - plen := i // plen is length of prefix - if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { - return "", "", false - } - return name[:i], name[i+1:], true -} - -// Write writes to the current file in the tar archive. -// Write returns the error ErrWriteTooLong if more than -// Header.Size bytes are written after WriteHeader. -// -// If the current file is sparse, then the regions marked as a hole -// must be written as NUL-bytes. -// -// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless -// of what the Header.Size claims. -func (tw *Writer) Write(b []byte) (int, error) { - if tw.err != nil { - return 0, tw.err - } - n, err := tw.curr.Write(b) - if err != nil && err != ErrWriteTooLong { - tw.err = err - } - return n, err -} - -// ReadFrom populates the content of the current file by reading from r. -// The bytes read must match the number of remaining bytes in the current file. -// -// If the current file is sparse and r is an io.ReadSeeker, -// then ReadFrom uses Seek to skip past holes defined in Header.SparseHoles, -// assuming that skipped regions are all NULs. -// This always reads the last byte to ensure r is the right size. -func (tw *Writer) ReadFrom(r io.Reader) (int64, error) { - if tw.err != nil { - return 0, tw.err - } - n, err := tw.curr.ReadFrom(r) - if err != nil && err != ErrWriteTooLong { - tw.err = err - } - return n, err -} - -// Close closes the tar archive by flushing the padding, and writing the footer. -// If the current file (from a prior call to WriteHeader) is not fully written, -// then this returns an error. -func (tw *Writer) Close() error { - if tw.err == ErrWriteAfterClose { - return nil - } - if tw.err != nil { - return tw.err - } - - // Trailer: two zero blocks. - err := tw.Flush() - for i := 0; i < 2 && err == nil; i++ { - _, err = tw.w.Write(zeroBlock[:]) - } - - // Ensure all future actions are invalid. - tw.err = ErrWriteAfterClose - return err // Report IO errors -} - -// regFileWriter is a fileWriter for writing data to a regular file entry. -type regFileWriter struct { - w io.Writer // Underlying Writer - nb int64 // Number of remaining bytes to write -} - -func (fw *regFileWriter) Write(b []byte) (n int, err error) { - overwrite := int64(len(b)) > fw.nb - if overwrite { - b = b[:fw.nb] - } - if len(b) > 0 { - n, err = fw.w.Write(b) - fw.nb -= int64(n) - } - switch { - case err != nil: - return n, err - case overwrite: - return n, ErrWriteTooLong - default: - return n, nil - } -} - -func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { - return io.Copy(struct{ io.Writer }{fw}, r) -} - -func (fw regFileWriter) LogicalRemaining() int64 { - return fw.nb -} -func (fw regFileWriter) PhysicalRemaining() int64 { - return fw.nb -} - -// sparseFileWriter is a fileWriter for writing data to a sparse file entry. -type sparseFileWriter struct { - fw fileWriter // Underlying fileWriter - sp sparseDatas // Normalized list of data fragments - pos int64 // Current position in sparse file -} - -func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { - overwrite := int64(len(b)) > sw.LogicalRemaining() - if overwrite { - b = b[:sw.LogicalRemaining()] - } - - b0 := b - endPos := sw.pos + int64(len(b)) - for endPos > sw.pos && err == nil { - var nf int // Bytes written in fragment - dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() - if sw.pos < dataStart { // In a hole fragment - bf := b[:min(int64(len(b)), dataStart-sw.pos)] - nf, err = zeroWriter{}.Write(bf) - } else { // In a data fragment - bf := b[:min(int64(len(b)), dataEnd-sw.pos)] - nf, err = sw.fw.Write(bf) - } - b = b[nf:] - sw.pos += int64(nf) - if sw.pos >= dataEnd && len(sw.sp) > 1 { - sw.sp = sw.sp[1:] // Ensure last fragment always remains - } - } - - n = len(b0) - len(b) - switch { - case err == ErrWriteTooLong: - return n, errMissData // Not possible; implies bug in validation logic - case err != nil: - return n, err - case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: - return n, errUnrefData // Not possible; implies bug in validation logic - case overwrite: - return n, ErrWriteTooLong - default: - return n, nil - } -} - -func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { - rs, ok := r.(io.ReadSeeker) - if ok { - if _, err := rs.Seek(0, io.SeekCurrent); err != nil { - ok = false // Not all io.Seeker can really seek - } - } - if !ok { - return io.Copy(struct{ io.Writer }{sw}, r) - } - - var readLastByte bool - pos0 := sw.pos - for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { - var nf int64 // Size of fragment - dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() - if sw.pos < dataStart { // In a hole fragment - nf = dataStart - sw.pos - if sw.PhysicalRemaining() == 0 { - readLastByte = true - nf-- - } - _, err = rs.Seek(nf, io.SeekCurrent) - } else { // In a data fragment - nf = dataEnd - sw.pos - nf, err = io.CopyN(sw.fw, rs, nf) - } - sw.pos += nf - if sw.pos >= dataEnd && len(sw.sp) > 1 { - sw.sp = sw.sp[1:] // Ensure last fragment always remains - } - } - - // If the last fragment is a hole, then seek to 1-byte before EOF, and - // read a single byte to ensure the file is the right size. - if readLastByte && err == nil { - _, err = mustReadFull(rs, []byte{0}) - sw.pos++ - } - - n = sw.pos - pos0 - switch { - case err == io.EOF: - return n, io.ErrUnexpectedEOF - case err == ErrWriteTooLong: - return n, errMissData // Not possible; implies bug in validation logic - case err != nil: - return n, err - case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: - return n, errUnrefData // Not possible; implies bug in validation logic - default: - return n, ensureEOF(rs) - } -} - -func (sw sparseFileWriter) LogicalRemaining() int64 { - return sw.sp[len(sw.sp)-1].endOffset() - sw.pos -} -func (sw sparseFileWriter) PhysicalRemaining() int64 { - return sw.fw.PhysicalRemaining() -} - -// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. -type zeroWriter struct{} - -func (zeroWriter) Write(b []byte) (int, error) { - for i, c := range b { - if c != 0 { - return i, errWriteHole - } - } - return len(b), nil -} - -// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. -func ensureEOF(r io.Reader) error { - n, err := tryReadFull(r, []byte{0}) - switch { - case n > 0: - return ErrWriteTooLong - case err == io.EOF: - return nil - default: - return err - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go b/vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go deleted file mode 100644 index 0265b9fb1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package interrupt - -import ( - "os" - "os/signal" - "sync" - "syscall" -) - -// terminationSignals are signals that cause the program to exit in the -// supported platforms (linux, darwin, windows). -var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT} - -// Handler guarantees execution of notifications after a critical section (the function passed -// to a Run method), even in the presence of process termination. It guarantees exactly once -// invocation of the provided notify functions. -type Handler struct { - notify []func() - final func(os.Signal) - once sync.Once -} - -// Chain creates a new handler that invokes all notify functions when the critical section exits -// and then invokes the optional handler's notifications. This allows critical sections to be -// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed -// but should not exit (which is the responsibility of the parent handler). -func Chain(handler *Handler, notify ...func()) *Handler { - if handler == nil { - return New(nil, notify...) - } - return New(handler.Signal, append(notify, handler.Close)...) -} - -// New creates a new handler that guarantees all notify functions are run after the critical -// section exits (or is interrupted by the OS), then invokes the final handler. If no final -// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for -// one critical section. -func New(final func(os.Signal), notify ...func()) *Handler { - return &Handler{ - final: final, - notify: notify, - } -} - -// Close executes all the notification handlers if they have not yet been executed. -func (h *Handler) Close() { - h.once.Do(func() { - for _, fn := range h.notify { - fn() - } - }) -} - -// Signal is called when an os.Signal is received, and guarantees that all notifications -// are executed, then the final handler is executed. This function should only be called once -// per Handler instance. -func (h *Handler) Signal(s os.Signal) { - h.once.Do(func() { - for _, fn := range h.notify { - fn() - } - if h.final == nil { - os.Exit(1) - } - h.final(s) - }) -} - -// Run ensures that any notifications are invoked after the provided fn exits (even if the -// process is interrupted by an OS termination signal). Notifications are only invoked once -// per Handler instance, so calling Run more than once will not behave as the user expects. -func (h *Handler) Run(fn func() error) error { - ch := make(chan os.Signal, 1) - signal.Notify(ch, terminationSignals...) - defer func() { - signal.Stop(ch) - close(ch) - }() - go func() { - sig, ok := <-ch - if !ok { - return - } - h.Signal(sig) - }() - defer h.Close() - return fn() -}