Prepare cri for merge to containerd
Signed-off-by: Derek McGowan <derek@mcg.dev>
This commit is contained in:
parent
a0b3b4e4da
commit
0820015314
@ -1,30 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
image: Visual Studio 2019
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\containerd\cri
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
PATH: C:\go\bin;C:\tools\mingw64\bin;$(PATH)
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: off
|
||||
matrix:
|
||||
- GO_VERSION: 1.13.15
|
||||
|
||||
install:
|
||||
# Install Mingw
|
||||
- choco install -y mingw --version 5.3.0
|
||||
# Install Go
|
||||
- rd C:\Go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GO_VERSION%.windows-amd64.zip
|
||||
- 7z x go%GO_VERSION%.windows-amd64.zip -oC:\ >nul
|
||||
- go version
|
||||
# Print powershell version
|
||||
- ps: $psversiontable
|
||||
|
||||
build_script:
|
||||
- bash.exe -elc "mingw32-make.exe"
|
||||
|
||||
test_script:
|
||||
- bash.exe -elc "mingw32-make.exe test"
|
177
.github/workflows/ci.yml
vendored
177
.github/workflows/ci.yml
vendored
@ -1,177 +0,0 @@
|
||||
name: CI
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
#
|
||||
# Project checks
|
||||
#
|
||||
project:
|
||||
name: Project Checks (DCO, Headers, Vendor)
|
||||
runs-on: ubuntu-18.04
|
||||
timeout-minutes: 5
|
||||
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: '1.13.15'
|
||||
|
||||
- name: Set env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::set-env name=GOPATH::${{ github.workspace }}"
|
||||
echo "::add-path::${{ github.workspace }}/bin"
|
||||
|
||||
- name: Checkout cri repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{github.workspace}}/src/github.com/containerd/cri
|
||||
fetch-depth: 150
|
||||
|
||||
- name: Checkout project repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: containerd/project
|
||||
path: src/github.com/containerd/project
|
||||
|
||||
#
|
||||
# Go get dependencies
|
||||
#
|
||||
- name: Install go get dependencies
|
||||
env:
|
||||
GO111MODULE: off
|
||||
run: |
|
||||
go get -u github.com/vbatts/git-validation
|
||||
go get -u github.com/kunalkushwaha/ltag
|
||||
go get -u github.com/LK4D4/vndr
|
||||
|
||||
#
|
||||
# DCO / File headers / Vendor directory validation
|
||||
#
|
||||
- name: DCO
|
||||
env:
|
||||
GITHUB_COMMIT_URL: ${{ github.event.pull_request.commits_url }}
|
||||
DCO_VERBOSITY: "-v"
|
||||
DCO_RANGE: ""
|
||||
working-directory: src/github.com/containerd/cri
|
||||
run: |
|
||||
set -x
|
||||
if [ -z "${GITHUB_COMMIT_URL}" ]; then
|
||||
DCO_RANGE=$(jq -r '.after + "..HEAD"' ${GITHUB_EVENT_PATH})
|
||||
else
|
||||
DCO_RANGE=$(curl ${GITHUB_COMMIT_URL} | jq -r '.[0].parents[0].sha +".."+ .[-1].sha')
|
||||
fi
|
||||
../project/script/validate/dco
|
||||
|
||||
- name: Headers
|
||||
working-directory: src/github.com/containerd/cri
|
||||
run: |
|
||||
ltag -t "../project/script/validate/template" --check -v
|
||||
|
||||
- name: Vendor
|
||||
working-directory: src/github.com/containerd/cri
|
||||
run: ../project/script/validate/vendor
|
||||
|
||||
#
|
||||
# build, unit, integration, and CRI tests
|
||||
#
|
||||
linux-build-and-test:
|
||||
name: Build, Unit, Integration, and CRI (linux amd64)
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: '1.13.15'
|
||||
|
||||
- name: Set env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::set-env name=GOPATH::${{ github.workspace }}"
|
||||
echo "::add-path::${{ github.workspace }}/bin"
|
||||
|
||||
- name: Checkout cri repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{github.workspace}}/src/github.com/containerd/cri
|
||||
fetch-depth: 150
|
||||
|
||||
- name: Before install
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
btrfs-tools \
|
||||
libseccomp2 \
|
||||
libseccomp-dev
|
||||
make install.deps
|
||||
working-directory: ${{github.workspace}}/src/github.com/containerd/cri
|
||||
|
||||
- name: Install containerd
|
||||
run: |
|
||||
make containerd
|
||||
sudo PATH=$PATH GOPATH=$GOPATH make install-containerd
|
||||
working-directory: ${{github.workspace}}/src/github.com/containerd/cri
|
||||
|
||||
- name: Unit Test
|
||||
run: |
|
||||
make test
|
||||
working-directory: ${{github.workspace}}/src/github.com/containerd/cri
|
||||
|
||||
- name: Integration Test
|
||||
run: |
|
||||
make test-integration
|
||||
working-directory: ${{github.workspace}}/src/github.com/containerd/cri
|
||||
|
||||
- name: Upload Integration Log File
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: integration-test-${{github.sha}}.log
|
||||
path: /tmp/test-integration/containerd.log
|
||||
|
||||
- name: CRI Test
|
||||
run: |
|
||||
make test-cri
|
||||
working-directory: ${{github.workspace}}/src/github.com/containerd/cri
|
||||
|
||||
- name: Upload CRI Test log file
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: cri-test-${{github.sha}}.log
|
||||
path: /tmp/test-cri/containerd.log
|
||||
|
||||
test-windows:
|
||||
name: Build and CRI Test (Windows amd64)
|
||||
runs-on: windows-2016
|
||||
steps:
|
||||
- name: Set up Go 1.13.15
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.13.15
|
||||
|
||||
- name: Checkout cri repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{github.workspace}}\\src\\github.com\\containerd\\cri
|
||||
|
||||
- name: Clone containerd repo
|
||||
run: |
|
||||
bash.exe -c "GO111MODULE=off go get github.com/containerd/containerd"
|
||||
|
||||
- name: Configure Windows environment variables
|
||||
run: |
|
||||
echo "::set-env name=GOPATH::$env:GITHUB_WORKSPACE"
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash.exe -c "pwd && ./test/windows/test.sh"
|
||||
working-directory: ${{github.workspace}}\\src\\github.com\\containerd\\cri
|
||||
|
||||
- name: Upload containerd log file
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: cadvisor-${{github.sha}}.log
|
||||
path: c:\\_artifacts\\containerd.log
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1 +0,0 @@
|
||||
_output/*
|
@ -1,22 +0,0 @@
|
||||
linters:
|
||||
enable:
|
||||
- structcheck
|
||||
- varcheck
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- vet
|
||||
- unused
|
||||
- misspell
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
run:
|
||||
timeout: 3m
|
||||
skip-dirs:
|
||||
- docs
|
||||
skip-files:
|
||||
- ".*\\.pb.go"
|
201
LICENSE
201
LICENSE
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
222
Makefile
222
Makefile
@ -1,222 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
GO := go
|
||||
GOOS := $(shell $(GO) env GOOS)
|
||||
GOARCH := $(shell $(GO) env GOARCH)
|
||||
WHALE := "🇩"
|
||||
ONI := "👹"
|
||||
ifeq ($(GOOS),windows)
|
||||
WHALE = "+"
|
||||
ONI = "-"
|
||||
endif
|
||||
EPOCH_TEST_COMMIT := 67de3e4ccf2b2a69b8398798af7cfca01abf7a7e
|
||||
PROJECT := github.com/containerd/cri
|
||||
BINDIR := ${DESTDIR}/usr/local/bin
|
||||
BUILD_DIR := _output
|
||||
# VERSION is derived from the current commit for HEAD. Version is used
|
||||
# to set/overide the containerd version in vendor/github.com/containerd/containerd/version.
|
||||
VERSION := $(shell git rev-parse --short HEAD)
|
||||
TARBALL_PREFIX := cri-containerd
|
||||
TARBALL := $(TARBALL_PREFIX)-$(VERSION).$(GOOS)-$(GOARCH).tar.gz
|
||||
ifneq ($(GOOS),windows)
|
||||
BUILD_TAGS := seccomp apparmor selinux
|
||||
endif
|
||||
export BUILDTAGS := $(BUILD_TAGS)
|
||||
# Add `-TEST` suffix to indicate that all binaries built from this repo are for test.
|
||||
GO_LDFLAGS := -X $(PROJECT)/vendor/github.com/containerd/containerd/version.Version=$(VERSION)-TEST
|
||||
SOURCES := $(shell find cmd/ pkg/ vendor/ -name '*.go')
|
||||
PLUGIN_SOURCES := $(shell ls *.go)
|
||||
INTEGRATION_SOURCES := $(shell find integration/ -name '*.go')
|
||||
|
||||
CONTAINERD_BIN := containerd
|
||||
ifeq ($(GOOS),windows)
|
||||
CONTAINERD_BIN := $(CONTAINERD_BIN).exe
|
||||
endif
|
||||
|
||||
all: binaries
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9._-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
|
||||
|
||||
verify: lint gofmt check-vendor ## execute the source code verification tools
|
||||
|
||||
version: ## print current cri plugin release version
|
||||
@echo $(VERSION)
|
||||
|
||||
lint:
|
||||
@echo "$(WHALE) $@"
|
||||
golangci-lint run
|
||||
|
||||
gofmt:
|
||||
@echo "$(WHALE) $@"
|
||||
@./hack/verify-gofmt.sh
|
||||
|
||||
check-vendor:
|
||||
@echo "$(WHALE) $@"
|
||||
@./hack/verify-vendor.sh
|
||||
|
||||
.PHONY: sort-vendor sync-vendor update-vendor
|
||||
|
||||
sort-vendor:
|
||||
@echo "$(WHALE) $@"
|
||||
@./hack/sort-vendor.sh
|
||||
|
||||
sync-vendor:
|
||||
@echo "$(WHALE) $@ from containerd"
|
||||
@./hack/sync-vendor.sh
|
||||
|
||||
update-vendor: sync-vendor sort-vendor ## Syncs containerd/vendor.conf -> vendor.conf and sorts vendor.conf
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
$(BUILD_DIR)/$(CONTAINERD_BIN): $(SOURCES) $(PLUGIN_SOURCES)
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) build -o $@ \
|
||||
-tags '$(BUILD_TAGS)' \
|
||||
-ldflags '$(GO_LDFLAGS)' \
|
||||
-gcflags '$(GO_GCFLAGS)' \
|
||||
$(PROJECT)/cmd/containerd
|
||||
|
||||
test: ## unit test
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) test -timeout=10m -race ./pkg/... \
|
||||
-tags '$(BUILD_TAGS)' \
|
||||
-ldflags '$(GO_LDFLAGS)' \
|
||||
-gcflags '$(GO_GCFLAGS)'
|
||||
|
||||
$(BUILD_DIR)/integration.test: $(INTEGRATION_SOURCES)
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) test -c $(PROJECT)/integration -o $(BUILD_DIR)/integration.test
|
||||
|
||||
test-integration: $(BUILD_DIR)/integration.test binaries ## integration test
|
||||
@echo "$(WHALE) $@"
|
||||
@./hack/test-integration.sh
|
||||
|
||||
test-cri: binaries ## critools CRI validation test
|
||||
@echo "$(WHALE) $@"
|
||||
@./hack/test-cri.sh
|
||||
|
||||
test-e2e-node: binaries ## e2e node test
|
||||
@echo "$(WHALE) $@"
|
||||
@VERSION=$(VERSION) ./hack/test-e2e-node.sh
|
||||
|
||||
clean: ## cleanup binaries
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -rf $(BUILD_DIR)/*
|
||||
|
||||
binaries: $(BUILD_DIR)/$(CONTAINERD_BIN) ## build a customized containerd (same result as make containerd)
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
static-binaries: GO_LDFLAGS += -extldflags "-fno-PIC -static"
|
||||
static-binaries: $(BUILD_DIR)/$(CONTAINERD_BIN) ## build static containerd
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
containerd: $(BUILD_DIR)/$(CONTAINERD_BIN) ## build a customized containerd with CRI plugin for testing
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
install-containerd: containerd ## installs customized containerd to system location
|
||||
@echo "$(WHALE) $@"
|
||||
@install -D -m 755 $(BUILD_DIR)/$(CONTAINERD_BIN) "$(BINDIR)/$(CONTAINERD_BIN)"
|
||||
|
||||
install: install-containerd ## installs customized containerd to system location
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
uninstall: ## remove containerd from system location
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f "$(BINDIR)/$(CONTAINERD_BIN)"
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
$(BUILD_DIR)/$(TARBALL): static-binaries vendor.conf
|
||||
@BUILD_DIR=$(BUILD_DIR) TARBALL=$(TARBALL) VERSION=$(VERSION) ./hack/release-windows.sh
|
||||
else
|
||||
$(BUILD_DIR)/$(TARBALL): static-binaries vendor.conf
|
||||
@BUILD_DIR=$(BUILD_DIR) TARBALL=$(TARBALL) VERSION=$(VERSION) ./hack/release.sh
|
||||
endif
|
||||
|
||||
release: $(BUILD_DIR)/$(TARBALL) ## build release tarball
|
||||
|
||||
push: $(BUILD_DIR)/$(TARBALL) ## push release tarball to GCS
|
||||
@echo "$(WHALE) $@"
|
||||
@BUILD_DIR=$(BUILD_DIR) TARBALL=$(TARBALL) VERSION=$(VERSION) ./hack/push.sh
|
||||
|
||||
proto: ## update protobuf of the cri plugin api
|
||||
@echo "$(WHALE) $@"
|
||||
@API_PATH=pkg/api/v1 hack/update-proto.sh
|
||||
@API_PATH=pkg/api/runtimeoptions/v1 hack/update-proto.sh
|
||||
|
||||
.PHONY: install.deps .install.deps.linux .install.deps.windows
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
install.deps: .install.deps.windows ## install windows deps on windows
|
||||
else
|
||||
install.deps: .install.deps.linux ## install windows deps on linux
|
||||
endif
|
||||
|
||||
.install.deps.linux: ## install dependencies of cri
|
||||
@echo "$(WHALE) $@"
|
||||
@./hack/install/install-deps.sh
|
||||
|
||||
.install.deps.windows: ## install dependencies of cri on windows
|
||||
@echo "$(WHALE) $@"
|
||||
@./hack/install/windows/install-deps.sh
|
||||
|
||||
.PHONY: .gitvalidation
|
||||
# make .gitvalidation is only used localy for manual testing
|
||||
# requires a clone of github.com/containerd/project
|
||||
# containerd/project DCO validation runs automatically with github actions in ci.yml for each pull
|
||||
.gitvalidation:
|
||||
@echo "$(WHALE) $@"
|
||||
DCO_VERBOSITY=-v DCO_RANGE=$(EPOCH_TEST_COMMIT)..HEAD ../project/script/validate/dco
|
||||
|
||||
.PHONY: install.tools .install.gitvalidation .install.golangci-lint .install.vndr
|
||||
|
||||
install.tools: .install.gitvalidation .install.golangci-lint .install.vndr ## install tools used by verify
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
.install.gitvalidation:
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) get -u github.com/vbatts/git-validation
|
||||
|
||||
.install.golangci-lint:
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) get -d github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
@cd $(GOPATH)/src/github.com/golangci/golangci-lint/cmd/golangci-lint; \
|
||||
git checkout v1.18.0; \
|
||||
go install
|
||||
|
||||
.install.vndr:
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) get -u github.com/LK4D4/vndr
|
||||
|
||||
.PHONY: \
|
||||
binaries \
|
||||
static-binaries \
|
||||
containerd \
|
||||
install-containerd \
|
||||
release \
|
||||
push \
|
||||
clean \
|
||||
default \
|
||||
gofmt \
|
||||
help \
|
||||
install \
|
||||
lint \
|
||||
test \
|
||||
test-integration \
|
||||
test-cri \
|
||||
test-e2e-node \
|
||||
uninstall \
|
||||
version \
|
||||
proto \
|
||||
check-vendor
|
15
OWNERS
15
OWNERS
@ -1,15 +0,0 @@
|
||||
# cri approvers and reviewers
|
||||
#
|
||||
# As a containerd sub-project, containerd maintainers are also included from https://github.com/containerd/project/blob/master/MAINTAINERS.
|
||||
# See https://github.com/containerd/project/blob/master/GOVERNANCE.md for description of maintainer role
|
||||
#
|
||||
approvers:
|
||||
- Random-Liu
|
||||
- mikebrow
|
||||
- abhi
|
||||
reviewers:
|
||||
- Random-Liu
|
||||
- mikebrow
|
||||
- abhi
|
||||
- yanxuean
|
||||
- miaoyq
|
189
README.md
189
README.md
@ -1,189 +0,0 @@
|
||||
# cri
|
||||
<p align="center">
|
||||
<img src="https://kubernetes.io/images/favicon.png" width="50" height="50">
|
||||
<img src="https://containerd.io/img/logos/icon/black/containerd-icon-black.png" width="50" >
|
||||
</p>
|
||||
|
||||
*Note: The standalone `cri-containerd` binary is end-of-life. `cri-containerd` is
|
||||
transitioning from a standalone binary that talks to containerd to a plugin within
|
||||
containerd. This github branch is for the `cri` plugin. See
|
||||
[standalone-cri-containerd branch](https://github.com/containerd/cri/tree/standalone-cri-containerd)
|
||||
for information about the standalone version of `cri-containerd`.*
|
||||
|
||||
*Note: You need to [drain your node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) before upgrading from standalone `cri-containerd` to containerd with `cri` plugin.*
|
||||
|
||||
[](https://travis-ci.org/containerd/cri)
|
||||
[](https://goreportcard.com/report/github.com/containerd/cri)
|
||||
|
||||
`cri` is a [containerd](https://containerd.io/) plugin implementation of Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/cri-api/blob/master/pkg/apis/runtime/v1alpha2/api.proto).
|
||||
|
||||
With it, you could run Kubernetes using containerd as the container runtime.
|
||||

|
||||
## Current Status
|
||||
`cri` is a native plugin of containerd 1.1 and above. It is built into containerd and enabled by default.
|
||||
|
||||
`cri` is in GA:
|
||||
* It is feature complete.
|
||||
* It (the GA version) works with Kubernetes 1.10 and above.
|
||||
* It has passed all [CRI validation tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/cri-validation.md).
|
||||
* It has passed all [node e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/e2e-node-tests.md).
|
||||
* It has passed all [e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md).
|
||||
|
||||
See [test dashboard](https://k8s-testgrid.appspot.com/sig-node-containerd)
|
||||
## Support Metrics
|
||||
| CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version |
|
||||
|:----------------------:|:------------------:|:------------------:|:-----------:|
|
||||
| v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 |
|
||||
| v1.0.0-beta.x | | 1.9 | v1alpha1 |
|
||||
| End-Of-Life | v1.1 (End-Of-Life) | 1.10+ | v1alpha2 |
|
||||
| | v1.2 (Extended) | 1.10+ | v1alpha2 |
|
||||
| | v1.3 | 1.12+ | v1alpha2 |
|
||||
| | v1.4 | 1.19+ (rc) | v1alpha2 |
|
||||
|
||||
**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration.
|
||||
|
||||
The following is the current support table for containerd CRI integration taking into account that Kubernetes only supports n-3 minor release versions.
|
||||
|
||||
| Containerd Version | Kubernetes Version | CRI Version |
|
||||
|:------------------:|:------------------:|:-----------:|
|
||||
| v1.2 | 1.15+ | v1alpha2 |
|
||||
| v1.3 | 1.15+ | v1alpha2 |
|
||||
| v1.4 | 1.19+ (rc) | v1alpha2 |
|
||||
|
||||
## Production Quality Cluster on GCE
|
||||
For a production quality cluster on GCE brought up with `kube-up.sh` refer [here](docs/kube-up.md).
|
||||
## Installing with Ansible and Kubeadm
|
||||
For a multi node cluster installer and bring up steps using ansible and kubeadm refer [here](contrib/ansible/README.md).
|
||||
## Custom Installation
|
||||
For non ansible users, you can download the `cri-containerd` release tarball and deploy
|
||||
kubernetes cluster using kubeadm as described [here](docs/installation.md).
|
||||
## Getting Started for Developers
|
||||
### Binary Dependencies and Specifications
|
||||
The current release of the `cri` plugin has the following dependencies:
|
||||
* [containerd](https://github.com/containerd/containerd)
|
||||
* [runc](https://github.com/opencontainers/runc)
|
||||
* [CNI](https://github.com/containernetworking/cni)
|
||||
|
||||
See [versions](./vendor.conf) of these dependencies `cri` is tested with.
|
||||
|
||||
As containerd and runc move to their respective general availability releases,
|
||||
we will do our best to rebase/retest `cri` with these releases on a
|
||||
weekly/monthly basis. Similarly, given that `cri` uses the Open
|
||||
Container Initiative (OCI) [image](https://github.com/opencontainers/image-spec)
|
||||
and [runtime](https://github.com/opencontainers/runtime-spec) specifications, we
|
||||
will also do our best to update `cri` to the latest releases of these
|
||||
specifications as appropriate.
|
||||
### Install Dependencies
|
||||
1. Install development libraries:
|
||||
* **libseccomp development library.** Required by `cri` and runc seccomp support. `libseccomp-dev` (Ubuntu, Debian) / `libseccomp-devel`
|
||||
(Fedora, CentOS, RHEL). On releases of Ubuntu <=Trusty and Debian <=jessie a
|
||||
backport version of `libseccomp-dev` is required. See [travis.yml](.travis.yml) for an example on trusty.
|
||||
* **btrfs development library.** Required by containerd btrfs support. `btrfs-tools`(Ubuntu, Debian) / `btrfs-progs-devel`(Fedora, CentOS, RHEL)
|
||||
2. Install **`pkg-config`** (required for linking with `libseccomp`).
|
||||
3. Install and setup a Go 1.13.15 development environment.
|
||||
4. Make a local clone of this repository.
|
||||
5. Install binary dependencies by running the following command from your cloned `cri/` project directory:
|
||||
```bash
|
||||
# Note: install.deps installs the above mentioned runc, containerd, and CNI
|
||||
# binary dependencies. install.deps is only provided for general use and ease of
|
||||
# testing. To customize `runc` and `containerd` build tags and/or to configure
|
||||
# `cni`, please follow instructions in their documents.
|
||||
make install.deps
|
||||
```
|
||||
### Build and Install `cri`
|
||||
To build and install a version of containerd with the `cri` plugin, enter the
|
||||
following commands from your `cri` project directory:
|
||||
```bash
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
*NOTE: The version of containerd built and installed from the `Makefile` is only for
|
||||
testing purposes. The version tag carries the suffix "-TEST".*
|
||||
#### Build Tags
|
||||
`cri` supports optional build tags for compiling support of various features.
|
||||
To add build tags to the make option the `BUILD_TAGS` variable must be set.
|
||||
|
||||
```bash
|
||||
make BUILD_TAGS='seccomp apparmor selinux'
|
||||
```
|
||||
|
||||
| Build Tag | Feature | Dependency |
|
||||
|-----------|------------------------------------|---------------------------------|
|
||||
| seccomp | syscall filtering | libseccomp development library |
|
||||
| selinux | selinux process and mount labeling | <none> |
|
||||
| apparmor | apparmor profile support | <none> |
|
||||
### Validate Your `cri` Setup
|
||||
A Kubernetes incubator project called [cri-tools](https://github.com/kubernetes-sigs/cri-tools)
|
||||
includes programs for exercising CRI implementations such as the `cri` plugin.
|
||||
More importantly, cri-tools includes the program `critest` which is used for running
|
||||
[CRI Validation Testing](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/cri-validation.md).
|
||||
|
||||
Run the CRI Validation test to validate your installation of `containerd` with `cri` built in:
|
||||
```bash
|
||||
make test-cri
|
||||
```
|
||||
### Running a Kubernetes local cluster
|
||||
If you already have a working development environment for supported Kubernetes
|
||||
version, you can try `cri` in a local cluster:
|
||||
|
||||
1. Start the version of `containerd` with `cri` plugin that you built and installed
|
||||
above as root in a first terminal:
|
||||
```bash
|
||||
sudo containerd
|
||||
```
|
||||
2. From the Kubernetes project directory startup a local cluster using `containerd`:
|
||||
```bash
|
||||
CONTAINER_RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT='unix:///run/containerd/containerd.sock' ./hack/local-up-cluster.sh
|
||||
```
|
||||
### Test
|
||||
See [here](./docs/testing.md) for information about test.
|
||||
## Using crictl
|
||||
See [here](./docs/crictl.md) for information about using `crictl` to debug
|
||||
pods, containers, and images.
|
||||
## Configurations
|
||||
See [here](./docs/config.md) for information about how to configure cri plugins
|
||||
and [here](https://github.com/containerd/containerd/blob/master/docs/man/containerd-config.8.md)
|
||||
for information about how to configure containerd
|
||||
## Documentation
|
||||
See [here](./docs) for additional documentation.
|
||||
## Communication
|
||||
For async communication and long running discussions please use issues and pull
|
||||
requests on this github repo. This will be the best place to discuss design and
|
||||
implementation.
|
||||
|
||||
For sync communication catch us in the `#containerd` and `#containerd-dev` slack
|
||||
channels on Cloud Native Computing Foundation's (CNCF) slack -
|
||||
`cloud-native.slack.com`. Everyone is welcome to join and chat.
|
||||
[Get Invite to CNCF slack.](https://slack.cncf.io)
|
||||
|
||||
## Other Communications
|
||||
As this project is tightly coupled to CRI and CRI-Tools and they are Kubernetes
|
||||
projects, some of our project communications take place in the Kubernetes' SIG:
|
||||
`sig-node.`
|
||||
|
||||
For more information about `sig-node`, `CRI`, and the `CRI-Tools` projects:
|
||||
* [sig-node community site](https://github.com/kubernetes/community/tree/master/sig-node)
|
||||
* Slack: `#sig-node` channel in Kubernetes (kubernetes.slack.com)
|
||||
* Mailing List: https://groups.google.com/forum/#!forum/kubernetes-sig-node
|
||||
|
||||
### Reporting Security Issues
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
|
||||
## Licenses
|
||||
The containerd codebase is released under the [Apache 2.0 license](https://github.com/containerd/containerd/blob/master/LICENSE.code).
|
||||
The README.md file, and files in the "docs" folder are licensed under the
|
||||
Creative Commons Attribution 4.0 International License under the terms and
|
||||
conditions set forth in the file "[LICENSE.docs](https://github.com/containerd/containerd/blob/master/LICENSE.docs)". You may obtain a duplicate
|
||||
copy of the same license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
||||
|
||||
## Project details
|
||||
cri is a containerd sub-project. This project was originally established in
|
||||
April of 2017 in the Kubernetes Incubator program. After reaching the Beta
|
||||
stage, In January of 2018, the project was merged into [containerd](https://github.com/containerd/containerd).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
@ -1,199 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: etcd
|
||||
homedir: /var/etcd
|
||||
lock_passwd: true
|
||||
ssh_redirect_user: true
|
||||
|
||||
write_files:
|
||||
# Setup containerd.
|
||||
- path: /etc/systemd/system/containerd-installation.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# installed by cloud-init
|
||||
[Unit]
|
||||
Description=Download and install containerd binaries and configurations.
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /home/containerd
|
||||
ExecStartPre=/bin/mount --bind /home/containerd /home/containerd
|
||||
ExecStartPre=/bin/mount -o remount,exec /home/containerd
|
||||
ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/containerd/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/containerd-configure-sh
|
||||
ExecStartPre=/bin/chmod 544 /home/containerd/configure.sh
|
||||
ExecStart=/home/containerd/configure.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=containerd.target
|
||||
|
||||
- path: /etc/systemd/system/containerd.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# installed by cloud-init
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=containerd-installation.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
OOMScoreAdjust=-999
|
||||
LimitNOFILE=1048576
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
ExecStartPre=/sbin/modprobe overlay
|
||||
ExecStart=/home/containerd/usr/local/bin/containerd
|
||||
|
||||
[Install]
|
||||
WantedBy=containerd.target
|
||||
|
||||
- path: /etc/systemd/system/containerd.target
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Containerd
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
# Setup kubernetes.
|
||||
- path: /etc/systemd/system/kube-master-installation.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Download and install k8s binaries and configurations
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /home/kubernetes/bin
|
||||
ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin
|
||||
ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin
|
||||
ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh
|
||||
ExecStart=/home/kubernetes/bin/configure.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-master-configuration.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Configure kubernetes master
|
||||
After=kube-master-installation.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure-helper.sh
|
||||
ExecStart=/home/kubernetes/bin/configure-helper.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-container-runtime-monitor.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes health monitoring for container runtime
|
||||
After=kube-master-configuration.service
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
RemainAfterExit=yes
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh container-runtime
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kubelet-monitor.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes health monitoring for kubelet
|
||||
After=kube-master-configuration.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-logrotate.timer
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Hourly kube-logrotate invocation
|
||||
|
||||
[Timer]
|
||||
OnCalendar=hourly
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-logrotate.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes log rotation
|
||||
After=kube-master-configuration.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=-/usr/sbin/logrotate /etc/logrotate.conf
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kubernetes.target
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
runcmd:
|
||||
# Stop the existing containerd service if there is one. (for Docker 18.09+)
|
||||
- systemctl is-active containerd && systemctl stop containerd
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable containerd-installation.service
|
||||
- systemctl enable containerd.service
|
||||
- systemctl enable containerd.target
|
||||
- systemctl enable kube-master-installation.service
|
||||
- systemctl enable kube-master-configuration.service
|
||||
- systemctl enable kubelet-monitor.service
|
||||
- systemctl enable kube-container-runtime-monitor.service
|
||||
- systemctl enable kube-logrotate.timer
|
||||
- systemctl enable kube-logrotate.service
|
||||
- systemctl enable kubernetes.target
|
||||
- systemctl start kubernetes.target
|
||||
# Start docker after containerd is running. (for Docker 18.09+)
|
||||
- systemctl is-enabled docker && (systemctl is-active docker || systemctl start docker)
|
@ -1,193 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
# Setup containerd.
|
||||
- path: /etc/systemd/system/containerd-installation.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# installed by cloud-init
|
||||
[Unit]
|
||||
Description=Download and install containerd binaries and configurations.
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /home/containerd
|
||||
ExecStartPre=/bin/mount --bind /home/containerd /home/containerd
|
||||
ExecStartPre=/bin/mount -o remount,exec /home/containerd
|
||||
ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/containerd/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/containerd-configure-sh
|
||||
ExecStartPre=/bin/chmod 544 /home/containerd/configure.sh
|
||||
ExecStart=/home/containerd/configure.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=containerd.target
|
||||
|
||||
- path: /etc/systemd/system/containerd.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# installed by cloud-init
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=containerd-installation.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
OOMScoreAdjust=-999
|
||||
LimitNOFILE=1048576
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
ExecStartPre=/sbin/modprobe overlay
|
||||
ExecStart=/home/containerd/usr/local/bin/containerd
|
||||
|
||||
[Install]
|
||||
WantedBy=containerd.target
|
||||
|
||||
- path: /etc/systemd/system/containerd.target
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Containerd
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
# Setup kubernetes.
|
||||
- path: /etc/systemd/system/kube-node-installation.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Download and install k8s binaries and configurations
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /home/kubernetes/bin
|
||||
ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin
|
||||
ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin
|
||||
ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh
|
||||
ExecStart=/home/kubernetes/bin/configure.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-node-configuration.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Configure kubernetes node
|
||||
After=kube-node-installation.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure-helper.sh
|
||||
ExecStart=/home/kubernetes/bin/configure-helper.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-container-runtime-monitor.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes health monitoring for container runtime
|
||||
After=kube-node-configuration.service
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
RemainAfterExit=yes
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh container-runtime
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kubelet-monitor.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes health monitoring for kubelet
|
||||
After=kube-node-configuration.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh
|
||||
ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-logrotate.timer
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Hourly kube-logrotate invocation
|
||||
|
||||
[Timer]
|
||||
OnCalendar=hourly
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kube-logrotate.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes log rotation
|
||||
After=kube-node-configuration.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=-/usr/sbin/logrotate /etc/logrotate.conf
|
||||
|
||||
[Install]
|
||||
WantedBy=kubernetes.target
|
||||
|
||||
- path: /etc/systemd/system/kubernetes.target
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
runcmd:
|
||||
# Stop the existing containerd service if there is one. (for Docker 18.09+)
|
||||
- systemctl is-active containerd && systemctl stop containerd
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable containerd-installation.service
|
||||
- systemctl enable containerd.service
|
||||
- systemctl enable containerd.target
|
||||
- systemctl enable kube-node-installation.service
|
||||
- systemctl enable kube-node-configuration.service
|
||||
- systemctl enable kubelet-monitor.service
|
||||
- systemctl enable kube-container-runtime-monitor.service
|
||||
- systemctl enable kube-logrotate.timer
|
||||
- systemctl enable kube-logrotate.service
|
||||
- systemctl enable kubernetes.target
|
||||
- systemctl start kubernetes.target
|
||||
# Start docker after containerd is running. (for Docker 18.09+)
|
||||
- systemctl is-enabled docker && (systemctl is-active docker || systemctl start docker)
|
@ -1,21 +0,0 @@
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "ptp",
|
||||
"mtu": 1460,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"ranges": [{{range $i, $range := .PodCIDRRanges}}{{if $i}}, {{end}}[{"subnet": "{{$range}}"}]{{end}}],
|
||||
"routes": [{{range $i, $route := .Routes}}{{if $i}}, {{end}}{"dst": "{{$route}}"}{{end}}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {
|
||||
"portMappings": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
@ -1,235 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
if [[ "$(python -V 2>&1)" =~ "Python 2" ]]; then
|
||||
# found python2, just use that
|
||||
PYTHON="python"
|
||||
elif [[ -f "/usr/bin/python2.7" ]]; then
|
||||
# System python not defaulted to python 2 but using 2.7 during migration
|
||||
PYTHON="/usr/bin/python2.7"
|
||||
else
|
||||
# No python2 either by default, let's see if we can find python3
|
||||
PYTHON="python3"
|
||||
if ! command -v ${PYTHON} >/dev/null 2>&1; then
|
||||
echo "ERROR Python not found. Aborting."
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
echo "Version : " $(${PYTHON} -V 2>&1)
|
||||
|
||||
# CONTAINERD_HOME is the directory for containerd.
|
||||
CONTAINERD_HOME="/home/containerd"
|
||||
cd "${CONTAINERD_HOME}"
|
||||
# KUBE_HOME is the directory for kubernetes.
|
||||
KUBE_HOME="/home/kubernetes"
|
||||
|
||||
# fetch_metadata fetches metadata from GCE metadata server.
|
||||
# Var set:
|
||||
# 1. Metadata key: key of the metadata.
|
||||
fetch_metadata() {
|
||||
local -r key=$1
|
||||
local -r attributes="http://metadata.google.internal/computeMetadata/v1/instance/attributes"
|
||||
if curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" "${attributes}/" | \
|
||||
grep -q "^${key}$"; then
|
||||
curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" \
|
||||
"${attributes}/${key}"
|
||||
fi
|
||||
}
|
||||
|
||||
# fetch_env fetches environment variables from GCE metadata server
|
||||
# and generate a env file under ${CONTAINERD_HOME}. It assumes that
|
||||
# the environment variables in metadata are in yaml format.
|
||||
fetch_env() {
|
||||
local -r env_file_name=$1
|
||||
(
|
||||
umask 077;
|
||||
local -r tmp_env_file="/tmp/${env_file_name}.yaml"
|
||||
tmp_env_content=$(fetch_metadata "${env_file_name}")
|
||||
if [ -z "${tmp_env_content}" ]; then
|
||||
echo "No environment variable is specified in ${env_file_name}"
|
||||
return
|
||||
fi
|
||||
echo "${tmp_env_content}" > "${tmp_env_file}"
|
||||
# Convert the yaml format file into a shell-style file.
|
||||
eval $(${PYTHON} -c '''
|
||||
import pipes,sys,yaml
|
||||
if sys.version_info[0] < 3:
|
||||
items = yaml.load(sys.stdin).iteritems()
|
||||
else:
|
||||
items = yaml.load(sys.stdin, Loader=yaml.BaseLoader).items()
|
||||
for k,v in items:
|
||||
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
|
||||
''' < "${tmp_env_file}" > "${CONTAINERD_HOME}/${env_file_name}")
|
||||
rm -f "${tmp_env_file}"
|
||||
)
|
||||
}
|
||||
|
||||
# is_preloaded checks whether a package has been preloaded in the image.
|
||||
is_preloaded() {
|
||||
local -r tar=$1
|
||||
local -r sha1=$2
|
||||
grep -qs "${tar},${sha1}" "${KUBE_HOME}/preload_info"
|
||||
}
|
||||
|
||||
# KUBE_ENV_METADATA is the metadata key for kubernetes envs.
|
||||
KUBE_ENV_METADATA="kube-env"
|
||||
fetch_env ${KUBE_ENV_METADATA}
|
||||
if [ -f "${CONTAINERD_HOME}/${KUBE_ENV_METADATA}" ]; then
|
||||
source "${CONTAINERD_HOME}/${KUBE_ENV_METADATA}"
|
||||
fi
|
||||
|
||||
# CONTAINERD_ENV_METADATA is the metadata key for containerd envs.
|
||||
CONTAINERD_ENV_METADATA="containerd-env"
|
||||
fetch_env ${CONTAINERD_ENV_METADATA}
|
||||
if [ -f "${CONTAINERD_HOME}/${CONTAINERD_ENV_METADATA}" ]; then
|
||||
source "${CONTAINERD_HOME}/${CONTAINERD_ENV_METADATA}"
|
||||
fi
|
||||
|
||||
# CONTAINERD_PKG_PREFIX is the prefix of the cri-containerd tarball name.
|
||||
# By default use the release tarball with cni built in.
|
||||
pkg_prefix=${CONTAINERD_PKG_PREFIX:-"cri-containerd-cni"}
|
||||
# Behave differently for test and production.
|
||||
if [ "${CONTAINERD_TEST:-"false"}" != "true" ]; then
|
||||
# CONTAINERD_DEPLOY_PATH is the gcs path where cri-containerd tarball is stored.
|
||||
deploy_path=${CONTAINERD_DEPLOY_PATH:-"cri-containerd-release"}
|
||||
# CONTAINERD_VERSION is the cri-containerd version to use.
|
||||
version=${CONTAINERD_VERSION:-""}
|
||||
else
|
||||
deploy_path=${CONTAINERD_DEPLOY_PATH:-"cri-containerd-staging"}
|
||||
|
||||
# PULL_REFS_METADATA is the metadata key of PULL_REFS from prow.
|
||||
PULL_REFS_METADATA="PULL_REFS"
|
||||
pull_refs=$(fetch_metadata "${PULL_REFS_METADATA}")
|
||||
if [ ! -z "${pull_refs}" ]; then
|
||||
deploy_dir=$(echo "${pull_refs}" | sha1sum | awk '{print $1}')
|
||||
deploy_path="${deploy_path}/${deploy_dir}"
|
||||
fi
|
||||
|
||||
# TODO(random-liu): Put version into the metadata instead of
|
||||
# deciding it in cloud init. This may cause issue to reboot test.
|
||||
version=$(curl -f --ipv4 --retry 6 --retry-delay 3 --silent --show-error \
|
||||
https://storage.googleapis.com/${deploy_path}/latest)
|
||||
fi
|
||||
|
||||
TARBALL_GCS_NAME="${pkg_prefix}-${version}.linux-amd64.tar.gz"
|
||||
# TARBALL_GCS_PATH is the path to download cri-containerd tarball for node e2e.
|
||||
TARBALL_GCS_PATH="https://storage.googleapis.com/${deploy_path}/${TARBALL_GCS_NAME}"
|
||||
# TARBALL is the name of the tarball after being downloaded.
|
||||
TARBALL="cri-containerd.tar.gz"
|
||||
# CONTAINERD_TAR_SHA1 is the sha1sum of containerd tarball.
|
||||
tar_sha1="${CONTAINERD_TAR_SHA1:-""}"
|
||||
|
||||
if [ -z "${version}" ]; then
|
||||
# Try using preloaded containerd if version is not specified.
|
||||
tarball_gcs_pattern="${pkg_prefix}-.*.linux-amd64.tar.gz"
|
||||
if is_preloaded "${tarball_gcs_pattern}" "${tar_sha1}"; then
|
||||
echo "CONTAINERD_VERSION is not set, use preloaded containerd"
|
||||
else
|
||||
echo "CONTAINERD_VERSION is not set, and containerd is not preloaded"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if is_preloaded "${TARBALL_GCS_NAME}" "${tar_sha1}"; then
|
||||
echo "${TARBALL_GCS_NAME} is preloaded"
|
||||
else
|
||||
# Download and untar the release tar ball.
|
||||
curl -f --ipv4 -Lo "${TARBALL}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${TARBALL_GCS_PATH}"
|
||||
tar xvf "${TARBALL}"
|
||||
rm -f "${TARBALL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove crictl shipped with containerd, use crictl installed
|
||||
# by kube-up.sh.
|
||||
rm -f "${CONTAINERD_HOME}/usr/local/bin/crictl"
|
||||
rm -f "${CONTAINERD_HOME}/etc/crictl.yaml"
|
||||
|
||||
# Generate containerd config
|
||||
config_path="${CONTAINERD_CONFIG_PATH:-"/etc/containerd/config.toml"}"
|
||||
mkdir -p $(dirname ${config_path})
|
||||
cni_bin_dir="${CONTAINERD_HOME}/opt/cni/bin"
|
||||
cni_template_path="${CONTAINERD_HOME}/opt/containerd/cluster/gce/cni.template"
|
||||
if [ "${KUBERNETES_MASTER:-}" != "true" ]; then
|
||||
if [ "${NETWORK_POLICY_PROVIDER:-"none"}" != "none" ] || [ "${ENABLE_NETD:-}" == "true" ]; then
|
||||
# Use Kubernetes cni daemonset on node if network policy provider is specified
|
||||
# or netd is enabled.
|
||||
cni_bin_dir="${KUBE_HOME}/bin"
|
||||
cni_template_path=""
|
||||
fi
|
||||
fi
|
||||
log_level="${CONTAINERD_LOG_LEVEL:-"info"}"
|
||||
max_container_log_line="${CONTAINERD_MAX_CONTAINER_LOG_LINE:-16384}"
|
||||
cat > ${config_path} <<EOF
|
||||
version = 2
|
||||
# Kubernetes requires the cri plugin.
|
||||
required_plugins = ["io.containerd.grpc.v1.cri"]
|
||||
# Kubernetes doesn't use containerd restart manager.
|
||||
disabled_plugins = ["io.containerd.internal.v1.restart"]
|
||||
[debug]
|
||||
level = "${log_level}"
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
stream_server_address = "127.0.0.1"
|
||||
stream_server_port = "0"
|
||||
max_container_log_line_size = ${max_container_log_line}
|
||||
[plugins."io.containerd.grpc.v1.cri".cni]
|
||||
bin_dir = "${cni_bin_dir}"
|
||||
conf_dir = "/etc/cni/net.d"
|
||||
conf_template = "${cni_template_path}"
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
|
||||
endpoint = ["https://mirror.gcr.io","https://registry-1.docker.io"]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "${CONTAINERD_DEFAULT_RUNTIME:-"runc"}"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
BinaryName = "${CONTAINERD_HOME}/usr/local/sbin/runc"
|
||||
EOF
|
||||
chmod 644 "${config_path}"
|
||||
|
||||
# containerd_extra_runtime_handler is the extra runtime handler to install.
|
||||
containerd_extra_runtime_handler=${CONTAINERD_EXTRA_RUNTIME_HANDLER:-""}
|
||||
if [[ -n "${containerd_extra_runtime_handler}" ]]; then
|
||||
cat >> ${config_path} <<EOF
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${containerd_extra_runtime_handler}]
|
||||
runtime_type = "${CONTAINERD_EXTRA_RUNTIME_TYPE:-io.containerd.runc.v1}"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${containerd_extra_runtime_handler}.options]
|
||||
${CONTAINERD_EXTRA_RUNTIME_OPTIONS:-}
|
||||
EOF
|
||||
fi
|
||||
|
||||
echo "export PATH=${CONTAINERD_HOME}/usr/local/bin/:${CONTAINERD_HOME}/usr/local/sbin/:\$PATH" > \
|
||||
/etc/profile.d/containerd_env.sh
|
||||
|
||||
# Run extra init script for test.
|
||||
if [ "${CONTAINERD_TEST:-"false"}" == "true" ]; then
|
||||
# EXTRA_INIT_SCRIPT is the name of the extra init script after being downloaded.
|
||||
EXTRA_INIT_SCRIPT="containerd-extra-init.sh"
|
||||
# EXTRA_INIT_SCRIPT_METADATA is the metadata key of init script.
|
||||
EXTRA_INIT_SCRIPT_METADATA="containerd-extra-init-sh"
|
||||
extra_init=$(fetch_metadata "${EXTRA_INIT_SCRIPT_METADATA}")
|
||||
# Return if containerd-extra-init-sh is not set.
|
||||
if [ -z "${extra_init}" ]; then
|
||||
exit 0
|
||||
fi
|
||||
echo "${extra_init}" > "${EXTRA_INIT_SCRIPT}"
|
||||
chmod 544 "${EXTRA_INIT_SCRIPT}"
|
||||
./${EXTRA_INIT_SCRIPT}
|
||||
fi
|
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
GCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
# TODO(random-liu): Upload release tarball to user's own GCS, and use it. We should
|
||||
# not let all nodes of all users download tarball from cri-containerd-release.
|
||||
version_file=${GCE_DIR}/../version
|
||||
if [ ! -f "${version_file}" ]; then
|
||||
echo "version file does not exist"
|
||||
exit 1
|
||||
fi
|
||||
export KUBE_MASTER_EXTRA_METADATA="user-data=${GCE_DIR}/cloud-init/master.yaml,containerd-configure-sh=${GCE_DIR}/configure.sh,containerd-env=${version_file}"
|
||||
export KUBE_NODE_EXTRA_METADATA="user-data=${GCE_DIR}/cloud-init/node.yaml,containerd-configure-sh=${GCE_DIR}/configure.sh,containerd-env=${version_file}"
|
||||
export KUBE_CONTAINER_RUNTIME="remote"
|
||||
export KUBE_CONTAINER_RUNTIME_ENDPOINT="unix:///run/containerd/containerd.sock"
|
||||
export KUBE_CONTAINER_RUNTIME_NAME=containerd
|
||||
export KUBE_LOAD_IMAGE_COMMAND="/home/containerd/usr/local/bin/ctr -n=k8s.io images import"
|
||||
export NETWORK_PROVIDER=""
|
||||
export NON_MASQUERADE_CIDR="0.0.0.0/0"
|
||||
export KUBE_KUBELET_EXTRA_ARGS="--runtime-cgroups=/system.slice/containerd.service"
|
||||
export KUBE_FEATURE_GATES="ExperimentalCriticalPodAnnotation=true,CRIContainerLogRotation=true"
|
@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "github.com/containerd/containerd/metrics/cgroups"
|
||||
_ "github.com/containerd/containerd/runtime/v1/linux"
|
||||
_ "github.com/containerd/containerd/snapshots/overlay"
|
||||
)
|
@ -1,25 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "github.com/containerd/containerd/diff/lcow"
|
||||
_ "github.com/containerd/containerd/diff/windows"
|
||||
_ "github.com/containerd/containerd/snapshots/windows"
|
||||
)
|
@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
_ "github.com/containerd/containerd/diff/walking/plugin"
|
||||
_ "github.com/containerd/containerd/gc/scheduler"
|
||||
_ "github.com/containerd/containerd/runtime/v2"
|
||||
_ "github.com/containerd/containerd/services/containers"
|
||||
_ "github.com/containerd/containerd/services/content"
|
||||
_ "github.com/containerd/containerd/services/diff"
|
||||
_ "github.com/containerd/containerd/services/events"
|
||||
_ "github.com/containerd/containerd/services/healthcheck"
|
||||
_ "github.com/containerd/containerd/services/images"
|
||||
_ "github.com/containerd/containerd/services/introspection"
|
||||
_ "github.com/containerd/containerd/services/leases"
|
||||
_ "github.com/containerd/containerd/services/namespaces"
|
||||
_ "github.com/containerd/containerd/services/snapshots"
|
||||
_ "github.com/containerd/containerd/services/tasks"
|
||||
_ "github.com/containerd/containerd/services/version"
|
||||
_ "github.com/containerd/cri"
|
||||
|
||||
"github.com/containerd/containerd/cmd/containerd/command"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func main() {
|
||||
app := command.App()
|
||||
logrus.Warn("This customized containerd is only for CI test, DO NOT use it for distribution.")
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "containerd: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
# Code of Conduct
|
||||
|
||||
This project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
@ -1,53 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/..
|
||||
|
||||
# PROJECT is the gce project to upload tarball.
|
||||
PROJECT=${PROJECT:-"k8s-cri-containerd"}
|
||||
|
||||
# GOOGLE_APPLICATION_CREDENTIALS is the path of service account file.
|
||||
if [ -z ${GOOGLE_APPLICATION_CREDENTIALS} ]; then
|
||||
echo "GOOGLE_APPLICATION_CREDENTIALS is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Activate gcloud service account.
|
||||
gcloud auth activate-service-account --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" --project="${PROJECT}"
|
||||
|
||||
# Install dependent libraries.
|
||||
apt-get update
|
||||
if apt-cache show libbtrfs-dev > /dev/null; then
|
||||
apt-get install -y libbtrfs-dev
|
||||
else
|
||||
apt-get install -y btrfs-tools
|
||||
fi
|
||||
|
||||
# Kubernetes test infra uses jessie and stretch.
|
||||
if cat /etc/os-release | grep jessie; then
|
||||
sh -c "echo 'deb http://ftp.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list"
|
||||
apt-get update
|
||||
apt-get install -y libseccomp2/jessie-backports
|
||||
apt-get install -y libseccomp-dev/jessie-backports
|
||||
else
|
||||
apt-get install -y libseccomp2
|
||||
apt-get install -y libseccomp-dev
|
||||
fi
|
||||
|
||||
# PULL_REFS is from prow.
|
||||
if [ ! -z "${PULL_REFS:-""}" ]; then
|
||||
DEPLOY_DIR=$(echo "${PULL_REFS}" | sha1sum | awk '{print $1}')
|
||||
fi
|
@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is used to build and upload containerd with latest CRI plugin
|
||||
# from containerd/cri in gcr.io/k8s-testimages/kubekins-e2e.
|
||||
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source $(dirname "${BASH_SOURCE[0]}")/build-utils.sh
|
||||
cd "${ROOT}"
|
||||
|
||||
# Make sure output directory is clean.
|
||||
make clean
|
||||
# Build and push test tarball.
|
||||
PUSH_VERSION=true DEPLOY_DIR=${DEPLOY_DIR:-""} \
|
||||
make push TARBALL_PREFIX=cri-containerd-cni INCLUDE_CNI=true CUSTOM_CONTAINERD=true
|
@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is used to build and upload latest containerd from
|
||||
# containerd/containerd in gcr.io/k8s-testimages/kubekins-e2e.
|
||||
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source $(dirname "${BASH_SOURCE[0]}")/../build-utils.sh
|
||||
cd "${ROOT}"
|
||||
|
||||
if [ -z "${GOPATH}" ]; then
|
||||
echo "GOPATH is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONTAINERD_PATH=${GOPATH}/src/github.com/containerd/containerd
|
||||
if [ ! -d "${CONTAINERD_PATH}" ]; then
|
||||
echo "containerd repo does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure output directory is clean.
|
||||
make clean
|
||||
# Build and push test tarball.
|
||||
PUSH_VERSION=true DEPLOY_DIR=${DEPLOY_DIR:-"containerd"} \
|
||||
make push TARBALL_PREFIX=containerd-cni \
|
||||
INCLUDE_CNI=true \
|
||||
CHECKOUT_CONTAINERD=false \
|
||||
VENDOR=${CONTAINERD_PATH}/vendor.conf
|
@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is used to do extra initialization on GCI.
|
||||
|
||||
mount /tmp /tmp -o remount,exec,suid
|
||||
#TODO(random-liu): Stop docker and remove this docker thing.
|
||||
usermod -a -G docker jenkins
|
||||
#TODO(random-liu): Change current node e2e to use init script,
|
||||
# so that we don't need to copy this code everywhere.
|
||||
mkdir -p /var/lib/kubelet
|
||||
mkdir -p /home/kubernetes/containerized_mounter/rootfs
|
||||
mount --bind /home/kubernetes/containerized_mounter/ /home/kubernetes/containerized_mounter/
|
||||
mount -o remount, exec /home/kubernetes/containerized_mounter/
|
||||
wget https://storage.googleapis.com/kubernetes-release/gci-mounter/mounter.tar -O /tmp/mounter.tar
|
||||
tar xvf /tmp/mounter.tar -C /home/kubernetes/containerized_mounter/rootfs
|
||||
mkdir -p /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
mount --rbind /var/lib/kubelet /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
mount --make-rshared /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet
|
||||
mount --bind /proc /home/kubernetes/containerized_mounter/rootfs/proc
|
||||
mount --bind /dev /home/kubernetes/containerized_mounter/rootfs/dev
|
||||
rm /tmp/mounter.tar
|
@ -1,73 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/systemd/system/containerd-installation.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# installed by cloud-init
|
||||
[Unit]
|
||||
Description=Download and install containerd binaries and configurations.
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /home/containerd
|
||||
ExecStartPre=/bin/mount --bind /home/containerd /home/containerd
|
||||
ExecStartPre=/bin/mount -o remount,exec /home/containerd
|
||||
ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/containerd/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/containerd-configure-sh
|
||||
ExecStartPre=/bin/chmod 544 /home/containerd/configure.sh
|
||||
ExecStart=/home/containerd/configure.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=containerd.target
|
||||
|
||||
- path: /etc/systemd/system/containerd.service
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
# installed by cloud-init
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=containerd-installation.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
OOMScoreAdjust=-999
|
||||
LimitNOFILE=1048576
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
ExecStartPre=/sbin/modprobe overlay
|
||||
ExecStart=/home/containerd/usr/local/bin/containerd
|
||||
|
||||
[Install]
|
||||
WantedBy=containerd.target
|
||||
|
||||
- path: /etc/systemd/system/containerd.target
|
||||
permissions: 0644
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Containerd
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
runcmd:
|
||||
# Stop the existing containerd service if there is one. (for Docker 18.09+)
|
||||
- systemctl is-active containerd && systemctl stop containerd
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable containerd-installation.service
|
||||
- systemctl enable containerd.service
|
||||
- systemctl enable containerd.target
|
||||
- systemctl start containerd.target
|
||||
# Start docker after containerd is running. (for Docker 18.09+)
|
||||
- systemctl is-active docker || systemctl start docker
|
@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is used to build and upload cri-containerd in gcr.io/k8s-testimages/kubekins-e2e.
|
||||
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/..
|
||||
cd "${ROOT}"
|
||||
|
||||
make install.tools
|
||||
make verify
|
||||
GOOS=macos make verify
|
||||
GOOS=windows make verify
|
@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is used to build and upload containerd with latest CRI plugin
|
||||
# from containerd/cri in gcr.io/k8s-testimages/kubekins-e2e.
|
||||
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source $(dirname "${BASH_SOURCE[0]}")/../build-utils.sh
|
||||
cd "${ROOT}"
|
||||
|
||||
# Make sure output directory is clean.
|
||||
GOOS=windows make clean
|
||||
# Build and push test tarball.
|
||||
PUSH_VERSION=true DEPLOY_DIR=${DEPLOY_DIR:-"windows"} GOOS=windows \
|
||||
make push TARBALL_PREFIX=cri-containerd-cni INCLUDE_CNI=true CUSTOM_CONTAINERD=true
|
@ -1,128 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
GCE_PROJECT="${GCE_PROJECT:-"cri-containerd-node-e2e"}"
|
||||
GCE_IMAGE="${GCE_IMAGE:-"windows-server-1809-dc-core-for-containers-v20190827"}"
|
||||
GCE_IMAGE_PROJECT="${GCE_IMAGE_PROJECT:-"windows-cloud"}"
|
||||
ZONE="${ZONE:-"us-west1-b"}"
|
||||
ARTIFACTS="${ARTIFACTS:-"/tmp/test-cri-windows/_artifacts"}"
|
||||
CLEANUP="${CLEANUP:-"true"}"
|
||||
|
||||
root="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/../..
|
||||
script_path="${root}/test/windows"
|
||||
node_name="windows-cri-$(cat /proc/sys/kernel/random/uuid)"
|
||||
tempdir="$(mktemp -d)"
|
||||
|
||||
# log logs the test logs.
|
||||
function log() {
|
||||
echo "$(date) $1"
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
rm -rf "$tempdir"
|
||||
if [[ "$CLEANUP" == "true" ]]; then
|
||||
log "Delete the test instance"
|
||||
gcloud compute instances delete -q "${node_name}"
|
||||
fi
|
||||
}
|
||||
|
||||
function retry_anyway() {
|
||||
retry_on_error 36 5 "" "$@"
|
||||
}
|
||||
|
||||
function retry_on_permission_error() {
|
||||
retry_on_error 36 5 "Permission denied" "$@"
|
||||
}
|
||||
|
||||
function retry_on_error() {
|
||||
local -r MAX_ATTEMPTS="$1"
|
||||
local -r SLEEP_PERIOD="$2"
|
||||
local -r ON_ERROR="$3"
|
||||
shift 3
|
||||
local attempts=1
|
||||
local -r stderr="$(mktemp -p "$tempdir")"
|
||||
until "$@" 2>"$stderr"; do
|
||||
cat "$stderr"
|
||||
(( attempts++ ))
|
||||
if [[ -n "$ON_ERROR" ]] && (! grep "$ON_ERROR" "$stderr" > /dev/null); then
|
||||
log "$* failed with unexpected error!"
|
||||
exit 1
|
||||
elif (( attempts > MAX_ATTEMPTS )); then
|
||||
log "$* failed, $MAX_ATTEMPTS retry exceeded!"
|
||||
exit 1
|
||||
else
|
||||
log "$* failed, retry in ${SLEEP_PERIOD} second..."
|
||||
fi
|
||||
sleep "${SLEEP_PERIOD}"
|
||||
done
|
||||
rm "$stderr"
|
||||
}
|
||||
|
||||
gcloud config set compute/zone "${ZONE}"
|
||||
gcloud config set project "${GCE_PROJECT}"
|
||||
|
||||
log "Create the test instance"
|
||||
gcloud compute instances create "${node_name}" --machine-type=n1-standard-2 \
|
||||
--image="${GCE_IMAGE}" --image-project="${GCE_IMAGE_PROJECT}" \
|
||||
--metadata-from-file=windows-startup-script-ps1="${script_path}/setup-ssh.ps1"
|
||||
trap cleanup EXIT
|
||||
|
||||
ssh=(gcloud compute ssh --ssh-flag="-ServerAliveInterval=30")
|
||||
scp=(gcloud compute scp)
|
||||
log "Wait for ssh to be ready"
|
||||
retry_anyway "${ssh[@]}" "${node_name}" --command="echo ssh ready"
|
||||
|
||||
log "Setup test environment in the test instance"
|
||||
retry_on_permission_error "${scp[@]}" "${script_path}/setup-vm.ps1" "${node_name}":"C:/setup-vm.ps1"
|
||||
retry_on_permission_error "${ssh[@]}" "${node_name}" --command="powershell /c C:/setup-vm.ps1"
|
||||
|
||||
log "Reboot the test instance to refresh environment variables"
|
||||
retry_on_permission_error "${ssh[@]}" "${node_name}" --command="powershell /c Restart-Computer"
|
||||
|
||||
log "Wait for ssh to be ready"
|
||||
retry_anyway "${ssh[@]}" "${node_name}" --command="echo ssh ready"
|
||||
|
||||
log "Run test on the test instance"
|
||||
cri_tar="/tmp/cri.tar.gz"
|
||||
tar -zcf "${cri_tar}" -C "${root}" . --owner=0 --group=0
|
||||
retry_on_permission_error "${scp[@]}" "${script_path}/test.sh" "${node_name}":"C:/test.sh"
|
||||
retry_on_permission_error "${scp[@]}" "${cri_tar}" "${node_name}":"C:/cri.tar.gz"
|
||||
rm "${cri_tar}"
|
||||
# git-bash doesn't return test exit code, the command should
|
||||
# succeed. We'll collect test exit code from _artifacts/.
|
||||
retry_on_permission_error "${ssh[@]}" "${node_name}" --command='powershell /c "Start-Process -FilePath \"C:\Program Files\Git\git-bash.exe\" -ArgumentList \"-elc\",\"`\"/c/test.sh &> /c/test.log`\"\" -Wait"'
|
||||
|
||||
log "Collect test logs"
|
||||
mkdir -p "${ARTIFACTS}"
|
||||
retry_on_permission_error "${scp[@]}" "${node_name}":"C:/test.log" "${ARTIFACTS}"
|
||||
retry_on_permission_error "${scp[@]}" --recurse "${node_name}":"C:/_artifacts/*" "${ARTIFACTS}"
|
||||
|
||||
log "Test output:"
|
||||
|
||||
# Make sure stdout is not in O_NONBLOCK mode.
|
||||
# See https://github.com/kubernetes/test-infra/issues/14938 for more details.
|
||||
python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); print(flags&os.O_NONBLOCK);'
|
||||
python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);'
|
||||
|
||||
cat "${ARTIFACTS}/test.log"
|
||||
|
||||
exit_code="$(cat "${ARTIFACTS}/exitcode")"
|
||||
exit "${exit_code}"
|
@ -1,32 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
$k8sversion = ("v1.16.0-beta.2")
|
||||
|
||||
$url = ("https://raw.githubusercontent.com/kubernetes/kubernetes/$k8sversion/cluster/gce/windows/testonly/user-profile.psm1")
|
||||
Invoke-WebRequest $url -OutFile C:\user-profile.psm1
|
||||
|
||||
$url = ("https://raw.githubusercontent.com/kubernetes/kubernetes/$k8sversion/cluster/gce/windows/common.psm1")
|
||||
Invoke-WebRequest $url -OutFile C:\common.psm1
|
||||
|
||||
$url = ("https://raw.githubusercontent.com/kubernetes/kubernetes/$k8sversion/cluster/gce/windows/testonly/install-ssh.psm1")
|
||||
Invoke-WebRequest $url -OutFile C:\install-ssh.psm1
|
||||
|
||||
Import-Module -Force C:\install-ssh.psm1
|
||||
|
||||
InstallAndStart-OpenSsh
|
||||
|
||||
StartProcess-WriteSshKeys
|
@ -1,23 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
|
||||
|
||||
choco install -y --no-progress git
|
||||
|
||||
choco install -y --no-progress golang
|
||||
|
||||
choco install -y --no-progress make
|
||||
|
||||
choco install -y --no-progress mingw
|
@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
export PATH="/c/Program Files/Containerd:$PATH"
|
||||
FOCUS="${FOCUS:-"Conformance"}"
|
||||
SKIP="${SKIP:-""}"
|
||||
REPORT_DIR="${REPORT_DIR:-"/c/_artifacts"}"
|
||||
|
||||
make install.deps
|
||||
make install -e BINDIR="/c/Program Files/Containerd"
|
||||
|
||||
mkdir -p "${REPORT_DIR}"
|
||||
containerd -log-level debug &> "${REPORT_DIR}/containerd.log" &
|
||||
pid=$!
|
||||
ctr version
|
||||
|
||||
set +o errexit
|
||||
critest --runtime-endpoint=npipe:////./pipe/containerd-containerd --ginkgo.focus="${FOCUS}" --ginkgo.skip="${SKIP}" --report-dir="${REPORT_DIR}" --report-prefix="windows"
|
||||
TEST_RC=$?
|
||||
set -o errexit
|
||||
kill -9 $pid
|
||||
echo -n "${TEST_RC}" > "${REPORT_DIR}/exitcode"
|
102
vendor.conf
102
vendor.conf
@ -1,102 +0,0 @@
|
||||
# cri dependencies
|
||||
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/willf/bitset v1.1.11
|
||||
|
||||
# containerd dependencies
|
||||
github.com/beorn7/perks v1.0.1
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/containerd/containerd v1.4.1
|
||||
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
||||
github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf
|
||||
github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c
|
||||
github.com/containerd/nri 0afc7f031eaf9c7d9c1a381b7ab5462e89c998fc
|
||||
github.com/containerd/ttrpc v1.0.1
|
||||
github.com/containerd/typeurl v1.0.1
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0
|
||||
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/godbus/dbus/v5 v5.0.3
|
||||
github.com/gogo/googleapis v1.3.2
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/protobuf v1.3.5
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.3
|
||||
github.com/imdario/mergo v0.3.7
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/Microsoft/go-winio v0.4.14
|
||||
github.com/Microsoft/hcsshim v0.8.9
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc92
|
||||
github.com/opencontainers/runtime-spec 4d89ac9fbff6c455f46a5bb59c6b1bb7184a5e43 # v1.0.3-0.20200728170252-4d89ac9fbff6
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/prometheus/procfs v0.0.11
|
||||
github.com/russross/blackfriday/v2 v2.0.1
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/urfave/cli v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys ed371f2e16b4b305ee99df548828de367527b76b
|
||||
golang.org/x/text v0.3.3
|
||||
google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
|
||||
google.golang.org/grpc v1.27.1
|
||||
|
||||
# cgroups dependencies
|
||||
github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28
|
||||
|
||||
# kubernetes dependencies
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
|
||||
github.com/emicklei/go-restful v2.9.5
|
||||
github.com/go-logr/logr v0.2.0
|
||||
github.com/google/gofuzz v1.1.0
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/modern-go/concurrent 1.0.3
|
||||
github.com/modern-go/reflect2 v1.0.1
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
golang.org/x/crypto 75b288015ac94e66e3d6715fb68a9b41bf046ec2
|
||||
golang.org/x/oauth2 858c2ad4c8b6c5d10852cb89079f6ca1c7309787
|
||||
golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82
|
||||
gopkg.in/inf.v0 v0.9.1
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.19.2
|
||||
k8s.io/apiserver v0.19.2
|
||||
k8s.io/apimachinery v0.19.2
|
||||
k8s.io/client-go v0.19.2
|
||||
k8s.io/component-base v0.19.2
|
||||
k8s.io/cri-api v0.19.2
|
||||
k8s.io/klog/v2 v2.2.0
|
||||
k8s.io/utils d5654de09c73da55eb19ae4ab4f734f7a61747a6
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
|
||||
# cni dependencies
|
||||
github.com/containerd/go-cni v1.0.1
|
||||
github.com/containernetworking/cni v0.8.0
|
||||
github.com/containernetworking/plugins v0.8.6
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
|
||||
# image decrypt depedencies
|
||||
github.com/containerd/imgcrypt v1.0.1
|
||||
github.com/containers/ocicrypt v1.0.1
|
||||
github.com/fullsailor/pkcs7 8306686428a5fe132eac8cb7c4848af725098bd4
|
||||
gopkg.in/square/go-jose.v2 v2.3.1
|
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@ -1,218 +0,0 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@ -1,509 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@ -1,568 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float() == 0.0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@ -1,953 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.atEOF = true
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
func (lx *lexer) backup() {
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("unexpected EOF")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
return lexBareTableName
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
return lexBareTableName
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.push(lexKeyEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
return lexBareKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
||||
// (which is not whitespace) has not yet been consumed.
|
||||
func lexBareKey(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
||||
// separator).
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case '+', '-':
|
||||
return lexNumberStart
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemRawMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
"escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumber
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@ -1,592 +0,0 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a string for a key (or part of a key in a table name).
|
||||
func (p *parser) keyString(it item) string {
|
||||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
case itemRawString:
|
||||
return tomlString
|
||||
case itemRawMultilineString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
@ -1,242 +0,0 @@
|
||||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@ -1,22 +0,0 @@
|
||||
# go-winio
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
@ -1,280 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
BackupEaData
|
||||
BackupSecurity
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
)
|
||||
|
||||
const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
}
|
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct {
|
||||
r io.Reader
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
if wsi.NameSize != 0 {
|
||||
name := make([]uint16, int(wsi.NameSize/2))
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Size -= 8
|
||||
}
|
||||
r.bytesLeft = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) {
|
||||
if r.bytesLeft == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > r.bytesLeft {
|
||||
b = b[:r.bytesLeft]
|
||||
}
|
||||
n, err := r.r.Read(b)
|
||||
r.bytesLeft -= int64(n)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if r.bytesLeft == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct {
|
||||
w io.Writer
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
|
||||
return &BackupStreamWriter{w, 0}
|
||||
}
|
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
if w.bytesLeft != 0 {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8
|
||||
}
|
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(name) != 0 {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.bytesLeft = hdr.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
|
||||
if w.bytesLeft < int64(len(b)) {
|
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
|
||||
}
|
||||
n, err := w.w.Write(b)
|
||||
w.bytesLeft -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
r := &BackupFileReader{f, includeSecurity, 0}
|
||||
return r
|
||||
}
|
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
return w
|
||||
}
|
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
return int(bytesWritten), errors.New("not all bytes could be written")
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
@ -1,137 +0,0 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
323
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
323
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@ -1,323 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
ErrTimeout = &timeoutError{}
|
||||
)
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIo() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go ioCompletionProcessor(h)
|
||||
}
|
||||
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
socket bool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
||||
type deadlineHandler struct {
|
||||
setLock sync.Mutex
|
||||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomicBool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIo)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.readDeadline.channel = make(timeoutChan)
|
||||
f.writeDeadline.channel = make(timeoutChan)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
// If we return the result of makeWin32File directly, it can result in an
|
||||
// interface-wrapped nil, rather than a nil interface value.
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes a win32File.
|
||||
func (f *win32File) Close() error {
|
||||
f.closeHandle()
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
f.wgLock.RUnlock()
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
op.ch <- ioResult{bytes, err}
|
||||
}
|
||||
}
|
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING {
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
if d != nil {
|
||||
d.channelLock.Lock()
|
||||
timeout = d.channel
|
||||
d.channelLock.Unlock()
|
||||
}
|
||||
|
||||
var r ioResult
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
} else if err != nil && f.socket {
|
||||
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||
var bytes, flags uint32
|
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE {
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *win32File) SetReadDeadline(deadline time.Time) error {
|
||||
return f.readDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||
return f.writeDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (f *win32File) Fd() uintptr {
|
||||
return uintptr(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
if !d.timer.Stop() {
|
||||
<-d.channel
|
||||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.setFalse()
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
d.channelLock.Lock()
|
||||
d.channel = make(chan struct{})
|
||||
d.channelLock.Unlock()
|
||||
default:
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.setTrue()
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
duration := deadline.Sub(now)
|
||||
if deadline.After(now) {
|
||||
// Deadline is in the future, set a timer to wait
|
||||
d.timer = time.AfterFunc(duration, timeoutIO)
|
||||
} else {
|
||||
// Deadline is in the past. Cancel all pending IO now.
|
||||
timeoutIO()
|
||||
}
|
||||
return nil
|
||||
}
|
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
61
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||
|
||||
const (
|
||||
fileBasicInfo = 0
|
||||
fileIDInfo = 0x12
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
VolumeSerialNumber uint64
|
||||
FileID [16]byte
|
||||
}
|
||||
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return fileID, nil
|
||||
}
|
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
@ -1,9 +0,0 @@
|
||||
module github.com/Microsoft/go-winio
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
|
||||
)
|
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@ -1,305 +0,0 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
|
||||
const (
|
||||
afHvSock = 34 // AF_HYPERV
|
||||
|
||||
socketError = ^uintptr(0)
|
||||
)
|
||||
|
||||
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||
type HvsockAddr struct {
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
type rawHvsockAddr struct {
|
||||
Family uint16
|
||||
_ uint16
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
// Network returns the address's network name, "hvsock".
|
||||
func (addr *HvsockAddr) Network() string {
|
||||
return "hvsock"
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) String() string {
|
||||
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
|
||||
}
|
||||
|
||||
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||
func VsockServiceID(port uint32) guid.GUID {
|
||||
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||
g.Data1 = port
|
||||
return g
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||
return rawHvsockAddr{
|
||||
Family: afHvSock,
|
||||
VMID: addr.VMID,
|
||||
ServiceID: addr.ServiceID,
|
||||
}
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||
addr.VMID = raw.VMID
|
||||
addr.ServiceID = raw.ServiceID
|
||||
}
|
||||
|
||||
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||
type HvsockListener struct {
|
||||
sock *win32File
|
||||
addr HvsockAddr
|
||||
}
|
||||
|
||||
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||
type HvsockConn struct {
|
||||
sock *win32File
|
||||
local, remote HvsockAddr
|
||||
}
|
||||
|
||||
func newHvSocket() (*win32File, error) {
|
||||
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
f, err := makeWin32File(fd)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
f.socket = true
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||
l := &HvsockListener{addr: *addr}
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", err)
|
||||
}
|
||||
sa := addr.raw()
|
||||
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||
}
|
||||
err = syscall.Listen(sock.handle, 16)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||
}
|
||||
return &HvsockListener{sock: sock, addr: *addr}, nil
|
||||
}
|
||||
|
||||
func (l *HvsockListener) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *HvsockListener) Addr() net.Addr {
|
||||
return &l.addr
|
||||
}
|
||||
|
||||
// Accept waits for the next connection and returns it.
|
||||
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := l.sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer l.sock.wg.Done()
|
||||
|
||||
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
|
||||
_, err = l.sock.asyncIo(c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
}
|
||||
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Close closes the listener, causing any pending Accept calls to fail.
|
||||
func (l *HvsockListener) Close() error {
|
||||
return l.sock.Close()
|
||||
}
|
||||
|
||||
/* Need to finish ConnectEx handling
|
||||
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sock.wg.Done()
|
||||
var bytes uint32
|
||||
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
|
||||
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
remote: *addr,
|
||||
}
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
*/
|
||||
|
||||
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("read", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var flags, bytes uint32
|
||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsarecv", err)
|
||||
}
|
||||
return 0, conn.opErr("read", err)
|
||||
} else if n == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||
t := 0
|
||||
for len(b) != 0 {
|
||||
n, err := conn.write(b)
|
||||
if err != nil {
|
||||
return t + n, err
|
||||
}
|
||||
t += n
|
||||
b = b[n:]
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var bytes uint32
|
||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsasend", err)
|
||||
}
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the socket connection, failing any pending read or write calls.
|
||||
func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address of the connection.
|
||||
func (conn *HvsockConn) LocalAddr() net.Addr {
|
||||
return &conn.local
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address of the connection.
|
||||
func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||
return &conn.remote
|
||||
}
|
||||
|
||||
// SetDeadline implements the net.Conn SetDeadline method.
|
||||
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||
conn.SetReadDeadline(t)
|
||||
conn.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
|
||||
return conn.sock.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
|
||||
return conn.sock.SetWriteDeadline(t)
|
||||
}
|
510
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
510
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -1,510 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
type objectAttributes struct {
|
||||
Length uintptr
|
||||
RootDirectory uintptr
|
||||
ObjectName *unicodeString
|
||||
Attributes uintptr
|
||||
SecurityDescriptor *securityDescriptor
|
||||
SecurityQoS uintptr
|
||||
}
|
||||
|
||||
type unicodeString struct {
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer uintptr
|
||||
}
|
||||
|
||||
type securityDescriptor struct {
|
||||
Revision byte
|
||||
Sbz1 byte
|
||||
Control uint16
|
||||
Owner uintptr
|
||||
Group uintptr
|
||||
Sacl uintptr
|
||||
Dacl uintptr
|
||||
}
|
||||
|
||||
type ntstatus int32
|
||||
|
||||
func (status ntstatus) Err() error {
|
||||
if status >= 0 {
|
||||
return nil
|
||||
}
|
||||
return rtlNtStatusToDosError(status)
|
||||
}
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
cERROR_NO_DATA = syscall.Errno(232)
|
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_TYPE_MESSAGE = 4
|
||||
|
||||
cPIPE_READMODE_MESSAGE = 2
|
||||
|
||||
cFILE_OPEN = 1
|
||||
cFILE_CREATE = 2
|
||||
|
||||
cFILE_PIPE_MESSAGE_TYPE = 1
|
||||
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
|
||||
|
||||
cSE_DACL_PRESENT = 4
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||
// This error should match net.errClosing since docker takes a dependency on its text.
|
||||
ErrPipeListenerClosed = errors.New("use of closed network connection")
|
||||
|
||||
errPipeWriteClosed = errors.New("pipe has been closed for write")
|
||||
)
|
||||
|
||||
type win32Pipe struct {
|
||||
*win32File
|
||||
path string
|
||||
}
|
||||
|
||||
type win32MessageBytePipe struct {
|
||||
win32Pipe
|
||||
writeClosed bool
|
||||
readEOF bool
|
||||
}
|
||||
|
||||
type pipeAddress string
|
||||
|
||||
func (f *win32Pipe) LocalAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) RemoteAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) SetDeadline(t time.Time) error {
|
||||
f.SetReadDeadline(t)
|
||||
f.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
func (f *win32MessageBytePipe) CloseWrite() error {
|
||||
if f.writeClosed {
|
||||
return errPipeWriteClosed
|
||||
}
|
||||
err := f.win32File.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.win32File.Write(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.writeClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
|
||||
// they are used to implement CloseWrite().
|
||||
func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
|
||||
if f.writeClosed {
|
||||
return 0, errPipeWriteClosed
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return f.win32File.Write(b)
|
||||
}
|
||||
|
||||
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
|
||||
// mode pipe will return io.EOF, as will all subsequent reads.
|
||||
func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
if f.readEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := f.win32File.Read(b)
|
||||
if err == io.EOF {
|
||||
// If this was the result of a zero-byte read, then
|
||||
// it is possible that the read was due to a zero-size
|
||||
// message. Since we are simulating CloseWrite with a
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == syscall.ERROR_MORE_DATA {
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s pipeAddress) Network() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
func (s pipeAddress) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
var absTimeout time.Time
|
||||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
h, err = tryDialPipe(ctx, &path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var flags uint32
|
||||
err = getNamedPipeInfo(h, &flags, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the pipe is in message mode, return a message byte pipe, which
|
||||
// supports CloseWrite().
|
||||
if flags&cPIPE_TYPE_MESSAGE != 0 {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: f, path: path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: f, path: path}, nil
|
||||
}
|
||||
|
||||
type acceptResponse struct {
|
||||
f *win32File
|
||||
err error
|
||||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
path16, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
var oa objectAttributes
|
||||
oa.Length = unsafe.Sizeof(oa)
|
||||
|
||||
var ntPath unicodeString
|
||||
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
defer localFree(ntPath.Buffer)
|
||||
oa.ObjectName = &ntPath
|
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
if sd != nil {
|
||||
len := uint32(len(sd))
|
||||
sdb := localAlloc(0, len)
|
||||
defer localFree(sdb)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||
} else {
|
||||
// Construct the default named pipe security descriptor.
|
||||
var dacl uintptr
|
||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
|
||||
}
|
||||
defer localFree(dacl)
|
||||
|
||||
sdb := &securityDescriptor{
|
||||
Revision: 1,
|
||||
Control: cSE_DACL_PRESENT,
|
||||
Dacl: dacl,
|
||||
}
|
||||
oa.SecurityDescriptor = sdb
|
||||
}
|
||||
}
|
||||
|
||||
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||
if c.MessageMode {
|
||||
typ |= cFILE_PIPE_MESSAGE_TYPE
|
||||
}
|
||||
|
||||
disposition := uint32(cFILE_OPEN)
|
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||
if first {
|
||||
disposition = cFILE_CREATE
|
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
access = syscall.SYNCHRONIZE
|
||||
}
|
||||
|
||||
timeout := int64(-50 * 10000) // 50ms
|
||||
|
||||
var (
|
||||
h syscall.Handle
|
||||
iosb ioStatusBlock
|
||||
)
|
||||
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(ntPath)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
|
||||
p, err := l.makeServerPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
p.Close()
|
||||
p = nil
|
||||
}
|
||||
case <-l.closeCh:
|
||||
// Abort the connect request by closing the handle.
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed {
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) listenerRoutine() {
|
||||
closed := false
|
||||
for !closed {
|
||||
select {
|
||||
case <-l.closeCh:
|
||||
closed = true
|
||||
case responseCh := <-l.acceptCh:
|
||||
var (
|
||||
p *win32File
|
||||
err error
|
||||
)
|
||||
for {
|
||||
p, err = l.makeConnectedServerPipe()
|
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != cERROR_NO_DATA {
|
||||
break
|
||||
}
|
||||
}
|
||||
responseCh <- acceptResponse{p, err}
|
||||
closed = err == ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
syscall.Close(l.firstHandle)
|
||||
l.firstHandle = 0
|
||||
// Notify Close() and Accept() callers that the handle has been closed.
|
||||
close(l.doneCh)
|
||||
}
|
||||
|
||||
// PipeConfig contain configuration for the pipe listener.
|
||||
type PipeConfig struct {
|
||||
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
|
||||
SecurityDescriptor string
|
||||
|
||||
// MessageMode determines whether the pipe is in byte or message mode. In either
|
||||
// case the pipe is read in byte mode by default. The only practical difference in
|
||||
// this implementation is that CloseWrite() is only supported for message mode pipes;
|
||||
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
|
||||
// transferred to the reader (and returned as io.EOF in this implementation)
|
||||
// when the pipe is in message mode.
|
||||
MessageMode bool
|
||||
|
||||
// InputBufferSize specifies the size the input buffer, in bytes.
|
||||
InputBufferSize int32
|
||||
|
||||
// OutputBufferSize specifies the size the input buffer, in bytes.
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must not already exist.
|
||||
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
var (
|
||||
sd []byte
|
||||
err error
|
||||
)
|
||||
if c == nil {
|
||||
c = &PipeConfig{}
|
||||
}
|
||||
if c.SecurityDescriptor != "" {
|
||||
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
h, err := makeServerPipeHandle(path, sd, c, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func connectPipe(p *win32File) error {
|
||||
c, err := p.prepareIo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.wg.Done()
|
||||
|
||||
err = connectNamedPipe(p.handle, &c.o)
|
||||
_, err = p.asyncIo(c, nil, 0, err)
|
||||
if err != nil && err != cERROR_PIPE_CONNECTED {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Accept() (net.Conn, error) {
|
||||
ch := make(chan acceptResponse)
|
||||
select {
|
||||
case l.acceptCh <- ch:
|
||||
response := <-ch
|
||||
err := response.err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l.config.MessageMode {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: response.f, path: l.path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: response.f, path: l.path}, nil
|
||||
case <-l.doneCh:
|
||||
return nil, ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Close() error {
|
||||
select {
|
||||
case l.closeCh <- 1:
|
||||
<-l.doneCh
|
||||
case <-l.doneCh:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Addr() net.Addr {
|
||||
return pipeAddress(l.path)
|
||||
}
|
20
vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go
generated
vendored
20
vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// Package etw provides support for TraceLogging-based ETW (Event Tracing
|
||||
// for Windows). TraceLogging is a format of ETW events that are self-describing
|
||||
// (the event contains information on its own schema). This allows them to be
|
||||
// decoded without needing a separate manifest with event information. The
|
||||
// implementation here is based on the information found in
|
||||
// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a
|
||||
// set of C macros.
|
||||
package etw
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go etw.go
|
||||
|
||||
//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister
|
||||
|
||||
//sys eventUnregister_64(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister
|
||||
//sys eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
||||
//sys eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
||||
|
||||
//sys eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) = advapi32.EventUnregister
|
||||
//sys eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer
|
||||
//sys eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation
|
71
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go
generated
vendored
71
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// eventData maintains a buffer which builds up the data for an ETW event. It
|
||||
// needs to be paired with EventMetadata which describes the event.
|
||||
type eventData struct {
|
||||
buffer bytes.Buffer
|
||||
}
|
||||
|
||||
// bytes returns the raw binary data containing the event data. The returned
|
||||
// value is not copied from the internal buffer, so it can be mutated by the
|
||||
// eventData object after it is returned.
|
||||
func (ed *eventData) bytes() []byte {
|
||||
return ed.buffer.Bytes()
|
||||
}
|
||||
|
||||
// writeString appends a string, including the null terminator, to the buffer.
|
||||
func (ed *eventData) writeString(data string) {
|
||||
ed.buffer.WriteString(data)
|
||||
ed.buffer.WriteByte(0)
|
||||
}
|
||||
|
||||
// writeInt8 appends a int8 to the buffer.
|
||||
func (ed *eventData) writeInt8(value int8) {
|
||||
ed.buffer.WriteByte(uint8(value))
|
||||
}
|
||||
|
||||
// writeInt16 appends a int16 to the buffer.
|
||||
func (ed *eventData) writeInt16(value int16) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// writeInt32 appends a int32 to the buffer.
|
||||
func (ed *eventData) writeInt32(value int32) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// writeInt64 appends a int64 to the buffer.
|
||||
func (ed *eventData) writeInt64(value int64) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// writeUint8 appends a uint8 to the buffer.
|
||||
func (ed *eventData) writeUint8(value uint8) {
|
||||
ed.buffer.WriteByte(value)
|
||||
}
|
||||
|
||||
// writeUint16 appends a uint16 to the buffer.
|
||||
func (ed *eventData) writeUint16(value uint16) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// writeUint32 appends a uint32 to the buffer.
|
||||
func (ed *eventData) writeUint32(value uint32) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// writeUint64 appends a uint64 to the buffer.
|
||||
func (ed *eventData) writeUint64(value uint64) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
||||
|
||||
// writeFiletime appends a FILETIME to the buffer.
|
||||
func (ed *eventData) writeFiletime(value syscall.Filetime) {
|
||||
binary.Write(&ed.buffer, binary.LittleEndian, value)
|
||||
}
|
29
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go
generated
vendored
29
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type eventDataDescriptorType uint8
|
||||
|
||||
const (
|
||||
eventDataDescriptorTypeUserData eventDataDescriptorType = iota
|
||||
eventDataDescriptorTypeEventMetadata
|
||||
eventDataDescriptorTypeProviderMetadata
|
||||
)
|
||||
|
||||
type eventDataDescriptor struct {
|
||||
ptr ptr64
|
||||
size uint32
|
||||
dataType eventDataDescriptorType
|
||||
reserved1 uint8
|
||||
reserved2 uint16
|
||||
}
|
||||
|
||||
func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor {
|
||||
return eventDataDescriptor{
|
||||
ptr: ptr64{ptr: unsafe.Pointer(&buffer[0])},
|
||||
size: uint32(len(buffer)),
|
||||
dataType: dataType,
|
||||
}
|
||||
}
|
84
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go
generated
vendored
84
vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
package etw
|
||||
|
||||
// Channel represents the ETW logging channel that is used. It can be used by
|
||||
// event consumers to give an event special treatment.
|
||||
type Channel uint8
|
||||
|
||||
const (
|
||||
// ChannelTraceLogging is the default channel for TraceLogging events. It is
|
||||
// not required to be used for TraceLogging, but will prevent decoding
|
||||
// issues for these events on older operating systems.
|
||||
ChannelTraceLogging Channel = 11
|
||||
)
|
||||
|
||||
// Level represents the ETW logging level. There are several predefined levels
|
||||
// that are commonly used, but technically anything from 0-255 is allowed.
|
||||
// Lower levels indicate more important events, and 0 indicates an event that
|
||||
// will always be collected.
|
||||
type Level uint8
|
||||
|
||||
// Predefined ETW log levels from winmeta.xml in the Windows SDK.
|
||||
const (
|
||||
LevelAlways Level = iota
|
||||
LevelCritical
|
||||
LevelError
|
||||
LevelWarning
|
||||
LevelInfo
|
||||
LevelVerbose
|
||||
)
|
||||
|
||||
// Opcode represents the operation that the event indicates is being performed.
|
||||
type Opcode uint8
|
||||
|
||||
// Predefined ETW opcodes from winmeta.xml in the Windows SDK.
|
||||
const (
|
||||
// OpcodeInfo indicates an informational event.
|
||||
OpcodeInfo Opcode = iota
|
||||
// OpcodeStart indicates the start of an operation.
|
||||
OpcodeStart
|
||||
// OpcodeStop indicates the end of an operation.
|
||||
OpcodeStop
|
||||
// OpcodeDCStart indicates the start of a provider capture state operation.
|
||||
OpcodeDCStart
|
||||
// OpcodeDCStop indicates the end of a provider capture state operation.
|
||||
OpcodeDCStop
|
||||
)
|
||||
|
||||
// EventDescriptor represents various metadata for an ETW event.
|
||||
type eventDescriptor struct {
|
||||
id uint16
|
||||
version uint8
|
||||
channel Channel
|
||||
level Level
|
||||
opcode Opcode
|
||||
task uint16
|
||||
keyword uint64
|
||||
}
|
||||
|
||||
// NewEventDescriptor returns an EventDescriptor initialized for use with
|
||||
// TraceLogging.
|
||||
func newEventDescriptor() *eventDescriptor {
|
||||
// Standard TraceLogging events default to the TraceLogging channel, and
|
||||
// verbose level.
|
||||
return &eventDescriptor{
|
||||
channel: ChannelTraceLogging,
|
||||
level: LevelVerbose,
|
||||
}
|
||||
}
|
||||
|
||||
// Identity returns the identity of the event. If the identity is not 0, it
|
||||
// should uniquely identify the other event metadata (contained in
|
||||
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
|
||||
// are relevant.
|
||||
func (ed *eventDescriptor) identity() uint32 {
|
||||
return (uint32(ed.version) << 16) | uint32(ed.id)
|
||||
}
|
||||
|
||||
// SetIdentity sets the identity of the event. If the identity is not 0, it
|
||||
// should uniquely identify the other event metadata (contained in
|
||||
// EventDescriptor, and field metadata). Only the lower 24 bits of this value
|
||||
// are relevant.
|
||||
func (ed *eventDescriptor) setIdentity(identity uint32) {
|
||||
ed.id = uint16(identity)
|
||||
ed.version = uint8(identity >> 16)
|
||||
}
|
177
vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go
generated
vendored
177
vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go
generated
vendored
@ -1,177 +0,0 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// inType indicates the type of data contained in the ETW event.
|
||||
type inType byte
|
||||
|
||||
// Various inType definitions for TraceLogging. These must match the definitions
|
||||
// found in TraceLoggingProvider.h in the Windows SDK.
|
||||
const (
|
||||
inTypeNull inType = iota
|
||||
inTypeUnicodeString
|
||||
inTypeANSIString
|
||||
inTypeInt8
|
||||
inTypeUint8
|
||||
inTypeInt16
|
||||
inTypeUint16
|
||||
inTypeInt32
|
||||
inTypeUint32
|
||||
inTypeInt64
|
||||
inTypeUint64
|
||||
inTypeFloat
|
||||
inTypeDouble
|
||||
inTypeBool32
|
||||
inTypeBinary
|
||||
inTypeGUID
|
||||
inTypePointerUnsupported
|
||||
inTypeFileTime
|
||||
inTypeSystemTime
|
||||
inTypeSID
|
||||
inTypeHexInt32
|
||||
inTypeHexInt64
|
||||
inTypeCountedString
|
||||
inTypeCountedANSIString
|
||||
inTypeStruct
|
||||
inTypeCountedBinary
|
||||
inTypeCountedArray inType = 32
|
||||
inTypeArray inType = 64
|
||||
)
|
||||
|
||||
// outType specifies a hint to the event decoder for how the value should be
|
||||
// formatted.
|
||||
type outType byte
|
||||
|
||||
// Various outType definitions for TraceLogging. These must match the
|
||||
// definitions found in TraceLoggingProvider.h in the Windows SDK.
|
||||
const (
|
||||
// outTypeDefault indicates that the default formatting for the inType will
|
||||
// be used by the event decoder.
|
||||
outTypeDefault outType = iota
|
||||
outTypeNoPrint
|
||||
outTypeString
|
||||
outTypeBoolean
|
||||
outTypeHex
|
||||
outTypePID
|
||||
outTypeTID
|
||||
outTypePort
|
||||
outTypeIPv4
|
||||
outTypeIPv6
|
||||
outTypeSocketAddress
|
||||
outTypeXML
|
||||
outTypeJSON
|
||||
outTypeWin32Error
|
||||
outTypeNTStatus
|
||||
outTypeHResult
|
||||
outTypeFileTime
|
||||
outTypeSigned
|
||||
outTypeUnsigned
|
||||
outTypeUTF8 outType = 35
|
||||
outTypePKCS7WithTypeInfo outType = 36
|
||||
outTypeCodePointer outType = 37
|
||||
outTypeDateTimeUTC outType = 38
|
||||
)
|
||||
|
||||
// eventMetadata maintains a buffer which builds up the metadata for an ETW
|
||||
// event. It needs to be paired with EventData which describes the event.
|
||||
type eventMetadata struct {
|
||||
buffer bytes.Buffer
|
||||
}
|
||||
|
||||
// bytes returns the raw binary data containing the event metadata. Before being
|
||||
// returned, the current size of the buffer is written to the start of the
|
||||
// buffer. The returned value is not copied from the internal buffer, so it can
|
||||
// be mutated by the eventMetadata object after it is returned.
|
||||
func (em *eventMetadata) bytes() []byte {
|
||||
// Finalize the event metadata buffer by filling in the buffer length at the
|
||||
// beginning.
|
||||
binary.LittleEndian.PutUint16(em.buffer.Bytes(), uint16(em.buffer.Len()))
|
||||
return em.buffer.Bytes()
|
||||
}
|
||||
|
||||
// writeEventHeader writes the metadata for the start of an event to the buffer.
|
||||
// This specifies the event name and tags.
|
||||
func (em *eventMetadata) writeEventHeader(name string, tags uint32) {
|
||||
binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder
|
||||
em.writeTags(tags)
|
||||
em.buffer.WriteString(name)
|
||||
em.buffer.WriteByte(0) // Null terminator for name
|
||||
}
|
||||
|
||||
func (em *eventMetadata) writeFieldInner(name string, inType inType, outType outType, tags uint32, arrSize uint16) {
|
||||
em.buffer.WriteString(name)
|
||||
em.buffer.WriteByte(0) // Null terminator for name
|
||||
|
||||
if outType == outTypeDefault && tags == 0 {
|
||||
em.buffer.WriteByte(byte(inType))
|
||||
} else {
|
||||
em.buffer.WriteByte(byte(inType | 128))
|
||||
if tags == 0 {
|
||||
em.buffer.WriteByte(byte(outType))
|
||||
} else {
|
||||
em.buffer.WriteByte(byte(outType | 128))
|
||||
em.writeTags(tags)
|
||||
}
|
||||
}
|
||||
|
||||
if arrSize != 0 {
|
||||
binary.Write(&em.buffer, binary.LittleEndian, arrSize)
|
||||
}
|
||||
}
|
||||
|
||||
// writeTags writes out the tags value to the event metadata. Tags is a 28-bit
|
||||
// value, interpreted as bit flags, which are only relevant to the event
|
||||
// consumer. The event consumer may choose to attribute special meaning to tags
|
||||
// (e.g. 0x4 could mean the field contains PII). Tags are written as a series of
|
||||
// bytes, each containing 7 bits of tag value, with the high bit set if there is
|
||||
// more tag data in the following byte. This allows for a more compact
|
||||
// representation when not all of the tag bits are needed.
|
||||
func (em *eventMetadata) writeTags(tags uint32) {
|
||||
// Only use the top 28 bits of the tags value.
|
||||
tags &= 0xfffffff
|
||||
|
||||
for {
|
||||
// Tags are written with the most significant bits (e.g. 21-27) first.
|
||||
val := tags >> 21
|
||||
|
||||
if tags&0x1fffff == 0 {
|
||||
// If there is no more data to write after this, write this value
|
||||
// without the high bit set, and return.
|
||||
em.buffer.WriteByte(byte(val & 0x7f))
|
||||
return
|
||||
}
|
||||
|
||||
em.buffer.WriteByte(byte(val | 0x80))
|
||||
|
||||
tags <<= 7
|
||||
}
|
||||
}
|
||||
|
||||
// writeField writes the metadata for a simple field to the buffer.
|
||||
func (em *eventMetadata) writeField(name string, inType inType, outType outType, tags uint32) {
|
||||
em.writeFieldInner(name, inType, outType, tags, 0)
|
||||
}
|
||||
|
||||
// writeArray writes the metadata for an array field to the buffer. The number
|
||||
// of elements in the array must be written as a uint16 in the event data,
|
||||
// immediately preceeding the event data.
|
||||
func (em *eventMetadata) writeArray(name string, inType inType, outType outType, tags uint32) {
|
||||
em.writeFieldInner(name, inType|inTypeArray, outType, tags, 0)
|
||||
}
|
||||
|
||||
// writeCountedArray writes the metadata for an array field to the buffer. The
|
||||
// size of a counted array is fixed, and the size is written into the metadata
|
||||
// directly.
|
||||
func (em *eventMetadata) writeCountedArray(name string, count uint16, inType inType, outType outType, tags uint32) {
|
||||
em.writeFieldInner(name, inType|inTypeCountedArray, outType, tags, count)
|
||||
}
|
||||
|
||||
// writeStruct writes the metadata for a nested struct to the buffer. The struct
|
||||
// contains the next N fields in the metadata, where N is specified by the
|
||||
// fieldCount argument.
|
||||
func (em *eventMetadata) writeStruct(name string, fieldCount uint8, tags uint32) {
|
||||
em.writeFieldInner(name, inTypeStruct, outType(fieldCount), tags, 0)
|
||||
}
|
73
vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go
generated
vendored
73
vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
type eventOptions struct {
|
||||
descriptor *eventDescriptor
|
||||
activityID guid.GUID
|
||||
relatedActivityID guid.GUID
|
||||
tags uint32
|
||||
}
|
||||
|
||||
// EventOpt defines the option function type that can be passed to
|
||||
// Provider.WriteEvent to specify general event options, such as level and
|
||||
// keyword.
|
||||
type EventOpt func(options *eventOptions)
|
||||
|
||||
// WithEventOpts returns the variadic arguments as a single slice.
|
||||
func WithEventOpts(opts ...EventOpt) []EventOpt {
|
||||
return opts
|
||||
}
|
||||
|
||||
// WithLevel specifies the level of the event to be written.
|
||||
func WithLevel(level Level) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.descriptor.level = level
|
||||
}
|
||||
}
|
||||
|
||||
// WithKeyword specifies the keywords of the event to be written. Multiple uses
|
||||
// of this option are OR'd together.
|
||||
func WithKeyword(keyword uint64) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.descriptor.keyword |= keyword
|
||||
}
|
||||
}
|
||||
|
||||
// WithChannel specifies the channel of the event to be written.
|
||||
func WithChannel(channel Channel) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.descriptor.channel = channel
|
||||
}
|
||||
}
|
||||
|
||||
// WithOpcode specifies the opcode of the event to be written.
|
||||
func WithOpcode(opcode Opcode) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.descriptor.opcode = opcode
|
||||
}
|
||||
}
|
||||
|
||||
// WithTags specifies the tags of the event to be written. Tags is a 28-bit
|
||||
// value (top 4 bits are ignored) which are interpreted by the event consumer.
|
||||
func WithTags(newTags uint32) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.tags |= newTags
|
||||
}
|
||||
}
|
||||
|
||||
// WithActivityID specifies the activity ID of the event to be written.
|
||||
func WithActivityID(activityID guid.GUID) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.activityID = activityID
|
||||
}
|
||||
}
|
||||
|
||||
// WithRelatedActivityID specifies the parent activity ID of the event to be written.
|
||||
func WithRelatedActivityID(activityID guid.GUID) EventOpt {
|
||||
return func(options *eventOptions) {
|
||||
options.relatedActivityID = activityID
|
||||
}
|
||||
}
|
514
vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go
generated
vendored
514
vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go
generated
vendored
@ -1,514 +0,0 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// FieldOpt defines the option function type that can be passed to
|
||||
// Provider.WriteEvent to add fields to the event.
|
||||
type FieldOpt func(em *eventMetadata, ed *eventData)
|
||||
|
||||
// WithFields returns the variadic arguments as a single slice.
|
||||
func WithFields(opts ...FieldOpt) []FieldOpt {
|
||||
return opts
|
||||
}
|
||||
|
||||
// BoolField adds a single bool field to the event.
|
||||
func BoolField(name string, value bool) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeUint8, outTypeBoolean, 0)
|
||||
bool8 := uint8(0)
|
||||
if value {
|
||||
bool8 = uint8(1)
|
||||
}
|
||||
ed.writeUint8(bool8)
|
||||
}
|
||||
}
|
||||
|
||||
// BoolArray adds an array of bool to the event.
|
||||
func BoolArray(name string, values []bool) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeUint8, outTypeBoolean, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
bool8 := uint8(0)
|
||||
if v {
|
||||
bool8 = uint8(1)
|
||||
}
|
||||
ed.writeUint8(bool8)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StringField adds a single string field to the event.
|
||||
func StringField(name string, value string) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeANSIString, outTypeUTF8, 0)
|
||||
ed.writeString(value)
|
||||
}
|
||||
}
|
||||
|
||||
// StringArray adds an array of string to the event.
|
||||
func StringArray(name string, values []string) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeANSIString, outTypeUTF8, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeString(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IntField adds a single int field to the event.
|
||||
func IntField(name string, value int) FieldOpt {
|
||||
switch unsafe.Sizeof(value) {
|
||||
case 4:
|
||||
return Int32Field(name, int32(value))
|
||||
case 8:
|
||||
return Int64Field(name, int64(value))
|
||||
default:
|
||||
panic("Unsupported int size")
|
||||
}
|
||||
}
|
||||
|
||||
// IntArray adds an array of int to the event.
|
||||
func IntArray(name string, values []int) FieldOpt {
|
||||
inType := inTypeNull
|
||||
var writeItem func(*eventData, int)
|
||||
switch unsafe.Sizeof(values[0]) {
|
||||
case 4:
|
||||
inType = inTypeInt32
|
||||
writeItem = func(ed *eventData, item int) { ed.writeInt32(int32(item)) }
|
||||
case 8:
|
||||
inType = inTypeInt64
|
||||
writeItem = func(ed *eventData, item int) { ed.writeInt64(int64(item)) }
|
||||
default:
|
||||
panic("Unsupported int size")
|
||||
}
|
||||
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inType, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
writeItem(ed, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int8Field adds a single int8 field to the event.
|
||||
func Int8Field(name string, value int8) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeInt8, outTypeDefault, 0)
|
||||
ed.writeInt8(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int8Array adds an array of int8 to the event.
|
||||
func Int8Array(name string, values []int8) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeInt8, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeInt8(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int16Field adds a single int16 field to the event.
|
||||
func Int16Field(name string, value int16) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeInt16, outTypeDefault, 0)
|
||||
ed.writeInt16(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int16Array adds an array of int16 to the event.
|
||||
func Int16Array(name string, values []int16) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeInt16, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeInt16(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int32Field adds a single int32 field to the event.
|
||||
func Int32Field(name string, value int32) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeInt32, outTypeDefault, 0)
|
||||
ed.writeInt32(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int32Array adds an array of int32 to the event.
|
||||
func Int32Array(name string, values []int32) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeInt32, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeInt32(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Int64Field adds a single int64 field to the event.
|
||||
func Int64Field(name string, value int64) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeInt64, outTypeDefault, 0)
|
||||
ed.writeInt64(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Int64Array adds an array of int64 to the event.
|
||||
func Int64Array(name string, values []int64) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeInt64, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeInt64(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UintField adds a single uint field to the event.
|
||||
func UintField(name string, value uint) FieldOpt {
|
||||
switch unsafe.Sizeof(value) {
|
||||
case 4:
|
||||
return Uint32Field(name, uint32(value))
|
||||
case 8:
|
||||
return Uint64Field(name, uint64(value))
|
||||
default:
|
||||
panic("Unsupported uint size")
|
||||
}
|
||||
}
|
||||
|
||||
// UintArray adds an array of uint to the event.
|
||||
func UintArray(name string, values []uint) FieldOpt {
|
||||
inType := inTypeNull
|
||||
var writeItem func(*eventData, uint)
|
||||
switch unsafe.Sizeof(values[0]) {
|
||||
case 4:
|
||||
inType = inTypeUint32
|
||||
writeItem = func(ed *eventData, item uint) { ed.writeUint32(uint32(item)) }
|
||||
case 8:
|
||||
inType = inTypeUint64
|
||||
writeItem = func(ed *eventData, item uint) { ed.writeUint64(uint64(item)) }
|
||||
default:
|
||||
panic("Unsupported uint size")
|
||||
}
|
||||
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inType, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
writeItem(ed, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint8Field adds a single uint8 field to the event.
|
||||
func Uint8Field(name string, value uint8) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeUint8, outTypeDefault, 0)
|
||||
ed.writeUint8(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint8Array adds an array of uint8 to the event.
|
||||
func Uint8Array(name string, values []uint8) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeUint8, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeUint8(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint16Field adds a single uint16 field to the event.
|
||||
func Uint16Field(name string, value uint16) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeUint16, outTypeDefault, 0)
|
||||
ed.writeUint16(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint16Array adds an array of uint16 to the event.
|
||||
func Uint16Array(name string, values []uint16) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeUint16, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeUint16(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint32Field adds a single uint32 field to the event.
|
||||
func Uint32Field(name string, value uint32) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeUint32, outTypeDefault, 0)
|
||||
ed.writeUint32(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint32Array adds an array of uint32 to the event.
|
||||
func Uint32Array(name string, values []uint32) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeUint32, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeUint32(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uint64Field adds a single uint64 field to the event.
|
||||
func Uint64Field(name string, value uint64) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeUint64, outTypeDefault, 0)
|
||||
ed.writeUint64(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Uint64Array adds an array of uint64 to the event.
|
||||
func Uint64Array(name string, values []uint64) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeUint64, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeUint64(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UintptrField adds a single uintptr field to the event.
|
||||
func UintptrField(name string, value uintptr) FieldOpt {
|
||||
inType := inTypeNull
|
||||
var writeItem func(*eventData, uintptr)
|
||||
switch unsafe.Sizeof(value) {
|
||||
case 4:
|
||||
inType = inTypeHexInt32
|
||||
writeItem = func(ed *eventData, item uintptr) { ed.writeUint32(uint32(item)) }
|
||||
case 8:
|
||||
inType = inTypeHexInt64
|
||||
writeItem = func(ed *eventData, item uintptr) { ed.writeUint64(uint64(item)) }
|
||||
default:
|
||||
panic("Unsupported uintptr size")
|
||||
}
|
||||
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inType, outTypeDefault, 0)
|
||||
writeItem(ed, value)
|
||||
}
|
||||
}
|
||||
|
||||
// UintptrArray adds an array of uintptr to the event.
|
||||
func UintptrArray(name string, values []uintptr) FieldOpt {
|
||||
inType := inTypeNull
|
||||
var writeItem func(*eventData, uintptr)
|
||||
switch unsafe.Sizeof(values[0]) {
|
||||
case 4:
|
||||
inType = inTypeHexInt32
|
||||
writeItem = func(ed *eventData, item uintptr) { ed.writeUint32(uint32(item)) }
|
||||
case 8:
|
||||
inType = inTypeHexInt64
|
||||
writeItem = func(ed *eventData, item uintptr) { ed.writeUint64(uint64(item)) }
|
||||
default:
|
||||
panic("Unsupported uintptr size")
|
||||
}
|
||||
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inType, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
writeItem(ed, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Float32Field adds a single float32 field to the event.
|
||||
func Float32Field(name string, value float32) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeFloat, outTypeDefault, 0)
|
||||
ed.writeUint32(math.Float32bits(value))
|
||||
}
|
||||
}
|
||||
|
||||
// Float32Array adds an array of float32 to the event.
|
||||
func Float32Array(name string, values []float32) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeFloat, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeUint32(math.Float32bits(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Float64Field adds a single float64 field to the event.
|
||||
func Float64Field(name string, value float64) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeDouble, outTypeDefault, 0)
|
||||
ed.writeUint64(math.Float64bits(value))
|
||||
}
|
||||
}
|
||||
|
||||
// Float64Array adds an array of float64 to the event.
|
||||
func Float64Array(name string, values []float64) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeArray(name, inTypeDouble, outTypeDefault, 0)
|
||||
ed.writeUint16(uint16(len(values)))
|
||||
for _, v := range values {
|
||||
ed.writeUint64(math.Float64bits(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Struct adds a nested struct to the event, the FieldOpts in the opts argument
|
||||
// are used to specify the fields of the struct.
|
||||
func Struct(name string, opts ...FieldOpt) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeStruct(name, uint8(len(opts)), 0)
|
||||
for _, opt := range opts {
|
||||
opt(em, ed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Time adds a time to the event.
|
||||
func Time(name string, value time.Time) FieldOpt {
|
||||
return func(em *eventMetadata, ed *eventData) {
|
||||
em.writeField(name, inTypeFileTime, outTypeDateTimeUTC, 0)
|
||||
ed.writeFiletime(syscall.NsecToFiletime(value.UTC().UnixNano()))
|
||||
}
|
||||
}
|
||||
|
||||
// Currently, we support logging basic builtin types (int, string, etc), slices
|
||||
// of basic builtin types, error, types derived from the basic types (e.g. "type
|
||||
// foo int"), and structs (recursively logging their fields). We do not support
|
||||
// slices of derived types (e.g. "[]foo").
|
||||
//
|
||||
// For types that we don't support, the value is formatted via fmt.Sprint, and
|
||||
// we also log a message that the type is unsupported along with the formatted
|
||||
// type. The intent of this is to make it easier to see which types are not
|
||||
// supported in traces, so we can evaluate adding support for more types in the
|
||||
// future.
|
||||
func SmartField(name string, v interface{}) FieldOpt {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return BoolField(name, v)
|
||||
case []bool:
|
||||
return BoolArray(name, v)
|
||||
case string:
|
||||
return StringField(name, v)
|
||||
case []string:
|
||||
return StringArray(name, v)
|
||||
case int:
|
||||
return IntField(name, v)
|
||||
case []int:
|
||||
return IntArray(name, v)
|
||||
case int8:
|
||||
return Int8Field(name, v)
|
||||
case []int8:
|
||||
return Int8Array(name, v)
|
||||
case int16:
|
||||
return Int16Field(name, v)
|
||||
case []int16:
|
||||
return Int16Array(name, v)
|
||||
case int32:
|
||||
return Int32Field(name, v)
|
||||
case []int32:
|
||||
return Int32Array(name, v)
|
||||
case int64:
|
||||
return Int64Field(name, v)
|
||||
case []int64:
|
||||
return Int64Array(name, v)
|
||||
case uint:
|
||||
return UintField(name, v)
|
||||
case []uint:
|
||||
return UintArray(name, v)
|
||||
case uint8:
|
||||
return Uint8Field(name, v)
|
||||
case []uint8:
|
||||
return Uint8Array(name, v)
|
||||
case uint16:
|
||||
return Uint16Field(name, v)
|
||||
case []uint16:
|
||||
return Uint16Array(name, v)
|
||||
case uint32:
|
||||
return Uint32Field(name, v)
|
||||
case []uint32:
|
||||
return Uint32Array(name, v)
|
||||
case uint64:
|
||||
return Uint64Field(name, v)
|
||||
case []uint64:
|
||||
return Uint64Array(name, v)
|
||||
case uintptr:
|
||||
return UintptrField(name, v)
|
||||
case []uintptr:
|
||||
return UintptrArray(name, v)
|
||||
case float32:
|
||||
return Float32Field(name, v)
|
||||
case []float32:
|
||||
return Float32Array(name, v)
|
||||
case float64:
|
||||
return Float64Field(name, v)
|
||||
case []float64:
|
||||
return Float64Array(name, v)
|
||||
case error:
|
||||
return StringField(name, v.Error())
|
||||
case time.Time:
|
||||
return Time(name, v)
|
||||
default:
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return SmartField(name, rv.Bool())
|
||||
case reflect.Int:
|
||||
return SmartField(name, int(rv.Int()))
|
||||
case reflect.Int8:
|
||||
return SmartField(name, int8(rv.Int()))
|
||||
case reflect.Int16:
|
||||
return SmartField(name, int16(rv.Int()))
|
||||
case reflect.Int32:
|
||||
return SmartField(name, int32(rv.Int()))
|
||||
case reflect.Int64:
|
||||
return SmartField(name, int64(rv.Int()))
|
||||
case reflect.Uint:
|
||||
return SmartField(name, uint(rv.Uint()))
|
||||
case reflect.Uint8:
|
||||
return SmartField(name, uint8(rv.Uint()))
|
||||
case reflect.Uint16:
|
||||
return SmartField(name, uint16(rv.Uint()))
|
||||
case reflect.Uint32:
|
||||
return SmartField(name, uint32(rv.Uint()))
|
||||
case reflect.Uint64:
|
||||
return SmartField(name, uint64(rv.Uint()))
|
||||
case reflect.Uintptr:
|
||||
return SmartField(name, uintptr(rv.Uint()))
|
||||
case reflect.Float32:
|
||||
return SmartField(name, float32(rv.Float()))
|
||||
case reflect.Float64:
|
||||
return SmartField(name, float64(rv.Float()))
|
||||
case reflect.String:
|
||||
return SmartField(name, rv.String())
|
||||
case reflect.Struct:
|
||||
fields := make([]FieldOpt, 0, rv.NumField())
|
||||
for i := 0; i < rv.NumField(); i++ {
|
||||
field := rv.Field(i)
|
||||
if field.CanInterface() {
|
||||
fields = append(fields, SmartField(name, field.Interface()))
|
||||
}
|
||||
}
|
||||
return Struct(name, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
return StringField(name, fmt.Sprintf("(Unsupported: %T) %v", v, v))
|
||||
}
|
53
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go
generated
vendored
53
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
// +build amd64 arm64 386
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// NewProviderWithID creates and registers a new ETW provider, allowing the
|
||||
// provider ID to be manually specified. This is most useful when there is an
|
||||
// existing provider ID that must be used to conform to existing diagnostic
|
||||
// infrastructure.
|
||||
func NewProviderWithID(name string, id guid.GUID, callback EnableCallback) (provider *Provider, err error) {
|
||||
providerCallbackOnce.Do(func() {
|
||||
globalProviderCallback = windows.NewCallback(providerCallbackAdapter)
|
||||
})
|
||||
|
||||
provider = providers.newProvider()
|
||||
defer func(provider *Provider) {
|
||||
if err != nil {
|
||||
providers.removeProvider(provider)
|
||||
}
|
||||
}(provider)
|
||||
provider.ID = id
|
||||
provider.callback = callback
|
||||
|
||||
if err := eventRegister((*windows.GUID)(&provider.ID), globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metadata := &bytes.Buffer{}
|
||||
binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later)
|
||||
metadata.WriteString(name)
|
||||
metadata.WriteByte(0) // Null terminator for name
|
||||
binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer
|
||||
provider.metadata = metadata.Bytes()
|
||||
|
||||
if err := eventSetInformation(
|
||||
provider.handle,
|
||||
eventInfoClassProviderSetTraits,
|
||||
uintptr(unsafe.Pointer(&provider.metadata[0])),
|
||||
uint32(len(provider.metadata))); err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
12
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go
generated
vendored
12
vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
// +build arm
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
// NewProviderWithID returns a nil provider on unsupported platforms.
|
||||
func NewProviderWithID(name string, id guid.GUID, callback EnableCallback) (provider *Provider, err error) {
|
||||
return nil, nil
|
||||
}
|
240
vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go
generated
vendored
240
vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go
generated
vendored
@ -1,240 +0,0 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/binary"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Provider represents an ETW event provider. It is identified by a provider
|
||||
// name and ID (GUID), which should always have a 1:1 mapping to each other
|
||||
// (e.g. don't use multiple provider names with the same ID, or vice versa).
|
||||
type Provider struct {
|
||||
ID guid.GUID
|
||||
handle providerHandle
|
||||
metadata []byte
|
||||
callback EnableCallback
|
||||
index uint
|
||||
enabled bool
|
||||
level Level
|
||||
keywordAny uint64
|
||||
keywordAll uint64
|
||||
}
|
||||
|
||||
// String returns the `provider`.ID as a string
|
||||
func (provider *Provider) String() string {
|
||||
if provider == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
return provider.ID.String()
|
||||
}
|
||||
|
||||
type providerHandle uint64
|
||||
|
||||
// ProviderState informs the provider EnableCallback what action is being
|
||||
// performed.
|
||||
type ProviderState uint32
|
||||
|
||||
const (
|
||||
// ProviderStateDisable indicates the provider is being disabled.
|
||||
ProviderStateDisable ProviderState = iota
|
||||
// ProviderStateEnable indicates the provider is being enabled.
|
||||
ProviderStateEnable
|
||||
// ProviderStateCaptureState indicates the provider is having its current
|
||||
// state snap-shotted.
|
||||
ProviderStateCaptureState
|
||||
)
|
||||
|
||||
type eventInfoClass uint32
|
||||
|
||||
const (
|
||||
eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota
|
||||
eventInfoClassProviderSetReserved1
|
||||
eventInfoClassProviderSetTraits
|
||||
eventInfoClassProviderUseDescriptorType
|
||||
)
|
||||
|
||||
// EnableCallback is the form of the callback function that receives provider
|
||||
// enable/disable notifications from ETW.
|
||||
type EnableCallback func(guid.GUID, ProviderState, Level, uint64, uint64, uintptr)
|
||||
|
||||
func providerCallback(sourceID guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) {
|
||||
provider := providers.getProvider(uint(i))
|
||||
|
||||
switch state {
|
||||
case ProviderStateDisable:
|
||||
provider.enabled = false
|
||||
case ProviderStateEnable:
|
||||
provider.enabled = true
|
||||
provider.level = level
|
||||
provider.keywordAny = matchAnyKeyword
|
||||
provider.keywordAll = matchAllKeyword
|
||||
}
|
||||
|
||||
if provider.callback != nil {
|
||||
provider.callback(sourceID, state, level, matchAnyKeyword, matchAllKeyword, filterData)
|
||||
}
|
||||
}
|
||||
|
||||
// providerCallbackAdapter acts as the first-level callback from the C/ETW side
|
||||
// for provider notifications. Because Go has trouble with callback arguments of
|
||||
// different size, it has only pointer-sized arguments, which are then cast to
|
||||
// the appropriate types when calling providerCallback.
|
||||
func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr {
|
||||
providerCallback(*sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i)
|
||||
return 0
|
||||
}
|
||||
|
||||
// providerIDFromName generates a provider ID based on the provider name. It
|
||||
// uses the same algorithm as used by .NET's EventSource class, which is based
|
||||
// on RFC 4122. More information on the algorithm can be found here:
|
||||
// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/
|
||||
//
|
||||
// The algorithm is roughly the RFC 4122 algorithm for a V5 UUID, but differs in
|
||||
// the following ways:
|
||||
// - The input name is first upper-cased, UTF16-encoded, and converted to
|
||||
// big-endian.
|
||||
// - No variant is set on the result UUID.
|
||||
// - The result UUID is treated as being in little-endian format, rather than
|
||||
// big-endian.
|
||||
func providerIDFromName(name string) guid.GUID {
|
||||
buffer := sha1.New()
|
||||
namespace := guid.GUID{0x482C2DB2, 0xC390, 0x47C8, [8]byte{0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}}
|
||||
namespaceBytes := namespace.ToArray()
|
||||
buffer.Write(namespaceBytes[:])
|
||||
binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name))))
|
||||
|
||||
sum := buffer.Sum(nil)
|
||||
sum[7] = (sum[7] & 0xf) | 0x50
|
||||
|
||||
a := [16]byte{}
|
||||
copy(a[:], sum)
|
||||
return guid.FromWindowsArray(a)
|
||||
}
|
||||
|
||||
// NewProvider creates and registers a new ETW provider. The provider ID is
|
||||
// generated based on the provider name.
|
||||
func NewProvider(name string, callback EnableCallback) (provider *Provider, err error) {
|
||||
return NewProviderWithID(name, providerIDFromName(name), callback)
|
||||
}
|
||||
|
||||
// Close unregisters the provider.
|
||||
func (provider *Provider) Close() error {
|
||||
if provider == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
providers.removeProvider(provider)
|
||||
return eventUnregister(provider.handle)
|
||||
}
|
||||
|
||||
// IsEnabled calls IsEnabledForLevelAndKeywords with LevelAlways and all
|
||||
// keywords set.
|
||||
func (provider *Provider) IsEnabled() bool {
|
||||
return provider.IsEnabledForLevelAndKeywords(LevelAlways, ^uint64(0))
|
||||
}
|
||||
|
||||
// IsEnabledForLevel calls IsEnabledForLevelAndKeywords with the specified level
|
||||
// and all keywords set.
|
||||
func (provider *Provider) IsEnabledForLevel(level Level) bool {
|
||||
return provider.IsEnabledForLevelAndKeywords(level, ^uint64(0))
|
||||
}
|
||||
|
||||
// IsEnabledForLevelAndKeywords allows event producer code to check if there are
|
||||
// any event sessions that are interested in an event, based on the event level
|
||||
// and keywords. Although this check happens automatically in the ETW
|
||||
// infrastructure, it can be useful to check if an event will actually be
|
||||
// consumed before doing expensive work to build the event data.
|
||||
func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool {
|
||||
if provider == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !provider.enabled {
|
||||
return false
|
||||
}
|
||||
|
||||
// ETW automatically sets the level to 255 if it is specified as 0, so we
|
||||
// don't need to worry about the level=0 (all events) case.
|
||||
if level > provider.level {
|
||||
return false
|
||||
}
|
||||
|
||||
if keywords != 0 && (keywords&provider.keywordAny == 0 || keywords&provider.keywordAll != provider.keywordAll) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteEvent writes a single ETW event from the provider. The event is
|
||||
// constructed based on the EventOpt and FieldOpt values that are passed as
|
||||
// opts.
|
||||
func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error {
|
||||
if provider == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
options := eventOptions{descriptor: newEventDescriptor()}
|
||||
em := &eventMetadata{}
|
||||
ed := &eventData{}
|
||||
|
||||
// We need to evaluate the EventOpts first since they might change tags, and
|
||||
// we write out the tags before evaluating FieldOpts.
|
||||
for _, opt := range eventOpts {
|
||||
opt(&options)
|
||||
}
|
||||
|
||||
if !provider.IsEnabledForLevelAndKeywords(options.descriptor.level, options.descriptor.keyword) {
|
||||
return nil
|
||||
}
|
||||
|
||||
em.writeEventHeader(name, options.tags)
|
||||
|
||||
for _, opt := range fieldOpts {
|
||||
opt(em, ed)
|
||||
}
|
||||
|
||||
// Don't pass a data blob if there is no event data. There will always be
|
||||
// event metadata (e.g. for the name) so we don't need to do this check for
|
||||
// the metadata.
|
||||
dataBlobs := [][]byte{}
|
||||
if len(ed.bytes()) > 0 {
|
||||
dataBlobs = [][]byte{ed.bytes()}
|
||||
}
|
||||
|
||||
return provider.writeEventRaw(options.descriptor, options.activityID, options.relatedActivityID, [][]byte{em.bytes()}, dataBlobs)
|
||||
}
|
||||
|
||||
// writeEventRaw writes a single ETW event from the provider. This function is
|
||||
// less abstracted than WriteEvent, and presents a fairly direct interface to
|
||||
// the event writing functionality. It expects a series of event metadata and
|
||||
// event data blobs to be passed in, which must conform to the TraceLogging
|
||||
// schema. The functions on EventMetadata and EventData can help with creating
|
||||
// these blobs. The blobs of each type are effectively concatenated together by
|
||||
// the ETW infrastructure.
|
||||
func (provider *Provider) writeEventRaw(
|
||||
descriptor *eventDescriptor,
|
||||
activityID guid.GUID,
|
||||
relatedActivityID guid.GUID,
|
||||
metadataBlobs [][]byte,
|
||||
dataBlobs [][]byte) error {
|
||||
|
||||
dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs))
|
||||
dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount)
|
||||
|
||||
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata))
|
||||
for _, blob := range metadataBlobs {
|
||||
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob))
|
||||
}
|
||||
for _, blob := range dataBlobs {
|
||||
dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob))
|
||||
}
|
||||
|
||||
return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(&activityID), (*windows.GUID)(&relatedActivityID), dataDescriptorCount, &dataDescriptors[0])
|
||||
}
|
52
vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go
generated
vendored
52
vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package etw
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Because the provider callback function needs to be able to access the
|
||||
// provider data when it is invoked by ETW, we need to keep provider data stored
|
||||
// in a global map based on an index. The index is passed as the callback
|
||||
// context to ETW.
|
||||
type providerMap struct {
|
||||
m map[uint]*Provider
|
||||
i uint
|
||||
lock sync.Mutex
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
var providers = providerMap{
|
||||
m: make(map[uint]*Provider),
|
||||
}
|
||||
|
||||
func (p *providerMap) newProvider() *Provider {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
i := p.i
|
||||
p.i++
|
||||
|
||||
provider := &Provider{
|
||||
index: i,
|
||||
}
|
||||
|
||||
p.m[i] = provider
|
||||
return provider
|
||||
}
|
||||
|
||||
func (p *providerMap) removeProvider(provider *Provider) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
delete(p.m, provider.index)
|
||||
}
|
||||
|
||||
func (p *providerMap) getProvider(index uint) *Provider {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
return p.m[index]
|
||||
}
|
||||
|
||||
var providerCallbackOnce sync.Once
|
||||
var globalProviderCallback uintptr
|
16
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go
generated
vendored
16
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
// +build 386 arm
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
|
||||
// be 64 bits, regardless of the actual size of a pointer on the platform. This
|
||||
// is intended for use with certain Windows APIs that expect a pointer as a
|
||||
// ULONGLONG.
|
||||
type ptr64 struct {
|
||||
ptr unsafe.Pointer
|
||||
_ uint32
|
||||
}
|
15
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
// +build amd64 arm64
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// byteptr64 defines a struct containing a pointer. The struct is guaranteed to
|
||||
// be 64 bits, regardless of the actual size of a pointer on the platform. This
|
||||
// is intended for use with certain Windows APIs that expect a pointer as a
|
||||
// ULONGLONG.
|
||||
type ptr64 struct {
|
||||
ptr unsafe.Pointer
|
||||
}
|
51
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go
generated
vendored
51
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
// +build 386 arm
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func low(v providerHandle) uint32 {
|
||||
return uint32(v & 0xffffffff)
|
||||
}
|
||||
|
||||
func high(v providerHandle) uint32 {
|
||||
return low(v >> 32)
|
||||
}
|
||||
|
||||
func eventUnregister(providerHandle providerHandle) (win32err error) {
|
||||
return eventUnregister_32(low(providerHandle), high(providerHandle))
|
||||
}
|
||||
|
||||
func eventWriteTransfer(
|
||||
providerHandle providerHandle,
|
||||
descriptor *eventDescriptor,
|
||||
activityID *windows.GUID,
|
||||
relatedActivityID *windows.GUID,
|
||||
dataDescriptorCount uint32,
|
||||
dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||
|
||||
return eventWriteTransfer_32(
|
||||
low(providerHandle),
|
||||
high(providerHandle),
|
||||
descriptor,
|
||||
activityID,
|
||||
relatedActivityID,
|
||||
dataDescriptorCount,
|
||||
dataDescriptors)
|
||||
}
|
||||
|
||||
func eventSetInformation(
|
||||
providerHandle providerHandle,
|
||||
class eventInfoClass,
|
||||
information uintptr,
|
||||
length uint32) (win32err error) {
|
||||
|
||||
return eventSetInformation_32(
|
||||
low(providerHandle),
|
||||
high(providerHandle),
|
||||
class,
|
||||
information,
|
||||
length)
|
||||
}
|
41
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go
generated
vendored
41
vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
// +build amd64 arm64
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func eventUnregister(providerHandle providerHandle) (win32err error) {
|
||||
return eventUnregister_64(providerHandle)
|
||||
}
|
||||
|
||||
func eventWriteTransfer(
|
||||
providerHandle providerHandle,
|
||||
descriptor *eventDescriptor,
|
||||
activityID *windows.GUID,
|
||||
relatedActivityID *windows.GUID,
|
||||
dataDescriptorCount uint32,
|
||||
dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||
|
||||
return eventWriteTransfer_64(
|
||||
providerHandle,
|
||||
descriptor,
|
||||
activityID,
|
||||
relatedActivityID,
|
||||
dataDescriptorCount,
|
||||
dataDescriptors)
|
||||
}
|
||||
|
||||
func eventSetInformation(
|
||||
providerHandle providerHandle,
|
||||
class eventInfoClass,
|
||||
information uintptr,
|
||||
length uint32) (win32err error) {
|
||||
|
||||
return eventSetInformation_64(
|
||||
providerHandle,
|
||||
class,
|
||||
information,
|
||||
length)
|
||||
}
|
102
vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go
generated
vendored
102
vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go
generated
vendored
@ -1,102 +0,0 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package etw
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procEventRegister = modadvapi32.NewProc("EventRegister")
|
||||
procEventUnregister = modadvapi32.NewProc("EventUnregister")
|
||||
procEventWriteTransfer = modadvapi32.NewProc("EventWriteTransfer")
|
||||
procEventSetInformation = modadvapi32.NewProc("EventSetInformation")
|
||||
)
|
||||
|
||||
func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procEventRegister.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(callback), uintptr(callbackContext), uintptr(unsafe.Pointer(providerHandle)), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventUnregister_64(providerHandle providerHandle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventWriteTransfer_64(providerHandle providerHandle, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventSetInformation_64(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventUnregister_32(providerHandle_low uint32, providerHandle_high uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 2, uintptr(providerHandle_low), uintptr(providerHandle_high), 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventWriteTransfer_32(providerHandle_low uint32, providerHandle_high uint32, descriptor *eventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procEventWriteTransfer.Addr(), 7, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors)), 0, 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func eventSetInformation_32(providerHandle_low uint32, providerHandle_high uint32, class eventInfoClass, information uintptr, length uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 5, uintptr(providerHandle_low), uintptr(providerHandle_high), uintptr(class), uintptr(information), uintptr(length), 0)
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
105
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
105
vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
package etwlogrus
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/etw"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Hook is a Logrus hook which logs received events to ETW.
|
||||
type Hook struct {
|
||||
provider *etw.Provider
|
||||
closeProvider bool
|
||||
}
|
||||
|
||||
// NewHook registers a new ETW provider and returns a hook to log from it. The
|
||||
// provider will be closed when the hook is closed.
|
||||
func NewHook(providerName string) (*Hook, error) {
|
||||
provider, err := etw.NewProvider(providerName, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Hook{provider, true}, nil
|
||||
}
|
||||
|
||||
// NewHookFromProvider creates a new hook based on an existing ETW provider. The
|
||||
// provider will not be closed when the hook is closed.
|
||||
func NewHookFromProvider(provider *etw.Provider) (*Hook, error) {
|
||||
return &Hook{provider, false}, nil
|
||||
}
|
||||
|
||||
// Levels returns the set of levels that this hook wants to receive log entries
|
||||
// for.
|
||||
func (h *Hook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
|
||||
var logrusToETWLevelMap = map[logrus.Level]etw.Level{
|
||||
logrus.PanicLevel: etw.LevelAlways,
|
||||
logrus.FatalLevel: etw.LevelCritical,
|
||||
logrus.ErrorLevel: etw.LevelError,
|
||||
logrus.WarnLevel: etw.LevelWarning,
|
||||
logrus.InfoLevel: etw.LevelInfo,
|
||||
logrus.DebugLevel: etw.LevelVerbose,
|
||||
logrus.TraceLevel: etw.LevelVerbose,
|
||||
}
|
||||
|
||||
// Fire receives each Logrus entry as it is logged, and logs it to ETW.
|
||||
func (h *Hook) Fire(e *logrus.Entry) error {
|
||||
// Logrus defines more levels than ETW typically uses, but analysis is
|
||||
// easiest when using a consistent set of levels across ETW providers, so we
|
||||
// map the Logrus levels to ETW levels.
|
||||
level := logrusToETWLevelMap[e.Level]
|
||||
if !h.provider.IsEnabledForLevel(level) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort the fields by name so they are consistent in each instance
|
||||
// of an event. Otherwise, the fields don't line up in WPA.
|
||||
names := make([]string, 0, len(e.Data))
|
||||
hasError := false
|
||||
for k := range e.Data {
|
||||
if k == logrus.ErrorKey {
|
||||
// Always put the error last because it is optional in some events.
|
||||
hasError = true
|
||||
} else {
|
||||
names = append(names, k)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
// Reserve extra space for the message and time fields.
|
||||
fields := make([]etw.FieldOpt, 0, len(e.Data)+2)
|
||||
fields = append(fields, etw.StringField("Message", e.Message))
|
||||
fields = append(fields, etw.Time("Time", e.Time))
|
||||
for _, k := range names {
|
||||
fields = append(fields, etw.SmartField(k, e.Data[k]))
|
||||
}
|
||||
if hasError {
|
||||
fields = append(fields, etw.SmartField(logrus.ErrorKey, e.Data[logrus.ErrorKey]))
|
||||
}
|
||||
|
||||
// Firing an ETW event is essentially best effort, as the event write can
|
||||
// fail for reasons completely out of the control of the event writer (such
|
||||
// as a session listening for the event having no available space in its
|
||||
// buffers). Therefore, we don't return the error from WriteEvent, as it is
|
||||
// just noise in many cases.
|
||||
h.provider.WriteEvent(
|
||||
"LogrusEntry",
|
||||
etw.WithEventOpts(etw.WithLevel(level)),
|
||||
fields)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close cleans up the hook and closes the ETW provider. If the provder was
|
||||
// registered by etwlogrus, it will be closed as part of `Close`. If the
|
||||
// provider was passed in, it will not be closed.
|
||||
func (h *Hook) Close() error {
|
||||
if h.closeProvider {
|
||||
return h.provider.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
47
vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go
generated
vendored
47
vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidPath is returned when the location of a file path doesn't begin with a driver letter.
|
||||
ErrInvalidPath = errors.New("the path provided to GetFileSystemType must start with a drive letter")
|
||||
)
|
||||
|
||||
// GetFileSystemType obtains the type of a file system through GetVolumeInformation.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
|
||||
func GetFileSystemType(path string) (fsType string, hr error) {
|
||||
drive := filepath.VolumeName(path)
|
||||
if len(drive) != 2 {
|
||||
return "", ErrInvalidPath
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
|
||||
buf = make([]uint16, 255)
|
||||
size = windows.MAX_PATH + 1
|
||||
)
|
||||
drive += `\`
|
||||
n := uintptr(unsafe.Pointer(nil))
|
||||
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
|
||||
if int32(r0) < 0 {
|
||||
hr = syscall.Errno(win32FromHresult(r0))
|
||||
}
|
||||
fsType = windows.UTF16ToString(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// win32FromHresult is a helper function to get the win32 error code from an HRESULT.
|
||||
func win32FromHresult(hr uintptr) uintptr {
|
||||
if hr&0x1fff0000 == 0x00070000 {
|
||||
return hr & 0xffff
|
||||
}
|
||||
return hr
|
||||
}
|
235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@ -1,235 +0,0 @@
|
||||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
// and the Windows (mixed-endian) encoding. See here for details:
|
||||
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||
package guid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
// how the entirety of the rest of the GUID is interpreted.
|
||||
type Variant uint8
|
||||
|
||||
// The variants specified by RFC 4122.
|
||||
const (
|
||||
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||
// the variant encodings specified in RFC 4122.
|
||||
VariantUnknown Variant = iota
|
||||
VariantNCS
|
||||
VariantRFC4122
|
||||
VariantMicrosoft
|
||||
VariantFuture
|
||||
)
|
||||
|
||||
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||
// hash of an input string.
|
||||
type Version uint8
|
||||
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
return GUID{}, err
|
||||
}
|
||||
|
||||
g := FromArray(b)
|
||||
g.setVersion(4) // Version 4 means randomly generated.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
|
||||
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
|
||||
// and the sample code treats it as a series of bytes, so we do the same here.
|
||||
//
|
||||
// Some implementations, such as those found on Windows, treat the name as a
|
||||
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||
// encoded as such before being passed to this function.
|
||||
func NewV5(namespace GUID, name []byte) (GUID, error) {
|
||||
b := sha1.New()
|
||||
namespaceBytes := namespace.ToArray()
|
||||
b.Write(namespaceBytes[:])
|
||||
b.Write(name)
|
||||
|
||||
a := [16]byte{}
|
||||
copy(a[:], b.Sum(nil))
|
||||
|
||||
g := FromArray(a)
|
||||
g.setVersion(5) // Version 5 means generated from a string.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
|
||||
var g GUID
|
||||
g.Data1 = order.Uint32(b[0:4])
|
||||
g.Data2 = order.Uint16(b[4:6])
|
||||
g.Data3 = order.Uint16(b[6:8])
|
||||
copy(g.Data4[:], b[8:16])
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
|
||||
b := [16]byte{}
|
||||
order.PutUint32(b[0:4], g.Data1)
|
||||
order.PutUint16(b[4:6], g.Data2)
|
||||
order.PutUint16(b[6:8], g.Data3)
|
||||
copy(b[8:16], g.Data4[:])
|
||||
return b
|
||||
}
|
||||
|
||||
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||
func FromArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.BigEndian)
|
||||
}
|
||||
|
||||
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||
// encoding.
|
||||
func (g GUID) ToArray() [16]byte {
|
||||
return g.toArray(binary.BigEndian)
|
||||
}
|
||||
|
||||
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||
func FromWindowsArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.LittleEndian)
|
||||
}
|
||||
|
||||
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||
// encoding.
|
||||
func (g GUID) ToWindowsArray() [16]byte {
|
||||
return g.toArray(binary.LittleEndian)
|
||||
}
|
||||
|
||||
func (g GUID) String() string {
|
||||
return fmt.Sprintf(
|
||||
"%08x-%04x-%04x-%04x-%012x",
|
||||
g.Data1,
|
||||
g.Data2,
|
||||
g.Data3,
|
||||
g.Data4[:2],
|
||||
g.Data4[2:])
|
||||
}
|
||||
|
||||
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||
// format.
|
||||
func FromString(s string) (GUID, error) {
|
||||
if len(s) != 36 {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
|
||||
var g GUID
|
||||
|
||||
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data1 = uint32(data1)
|
||||
|
||||
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data2 = uint16(data2)
|
||||
|
||||
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data3 = uint16(data3)
|
||||
|
||||
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data4[i] = uint8(v)
|
||||
}
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func (g *GUID) setVariant(v Variant) {
|
||||
d := g.Data4[0]
|
||||
switch v {
|
||||
case VariantNCS:
|
||||
d = (d & 0x7f)
|
||||
case VariantRFC4122:
|
||||
d = (d & 0x3f) | 0x80
|
||||
case VariantMicrosoft:
|
||||
d = (d & 0x1f) | 0xc0
|
||||
case VariantFuture:
|
||||
d = (d & 0x0f) | 0xe0
|
||||
case VariantUnknown:
|
||||
fallthrough
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid variant: %d", v))
|
||||
}
|
||||
g.Data4[0] = d
|
||||
}
|
||||
|
||||
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||
func (g GUID) Variant() Variant {
|
||||
b := g.Data4[0]
|
||||
if b&0x80 == 0 {
|
||||
return VariantNCS
|
||||
} else if b&0xc0 == 0x80 {
|
||||
return VariantRFC4122
|
||||
} else if b&0xe0 == 0xc0 {
|
||||
return VariantMicrosoft
|
||||
} else if b&0xe0 == 0xe0 {
|
||||
return VariantFuture
|
||||
}
|
||||
return VariantUnknown
|
||||
}
|
||||
|
||||
func (g *GUID) setVersion(v Version) {
|
||||
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
|
||||
}
|
||||
|
||||
// Version returns the GUID version, as defined in RFC 4122.
|
||||
func (g GUID) Version() Version {
|
||||
return Version((g.Data3 & 0xF000) >> 12)
|
||||
}
|
||||
|
||||
// MarshalText returns the textual representation of the GUID.
|
||||
func (g GUID) MarshalText() ([]byte, error) {
|
||||
return []byte(g.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||
// into this GUID.
|
||||
func (g *GUID) UnmarshalText(text []byte) error {
|
||||
g2, err := FromString(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*g = g2
|
||||
return nil
|
||||
}
|
159
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
159
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
@ -1,159 +0,0 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
accessMask uint32
|
||||
accessMode uint32
|
||||
desiredAccess uint32
|
||||
inheritMode uint32
|
||||
objectType uint32
|
||||
shareMode uint32
|
||||
securityInformation uint32
|
||||
trusteeForm uint32
|
||||
trusteeType uint32
|
||||
|
||||
explicitAccess struct {
|
||||
accessPermissions accessMask
|
||||
accessMode accessMode
|
||||
inheritance inheritMode
|
||||
trustee trustee
|
||||
}
|
||||
|
||||
trustee struct {
|
||||
multipleTrustee *trustee
|
||||
multipleTrusteeOperation int32
|
||||
trusteeForm trusteeForm
|
||||
trusteeType trusteeType
|
||||
name uintptr
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ
|
||||
|
||||
accessModeGrant accessMode = 1
|
||||
|
||||
desiredAccessReadControl desiredAccess = 0x20000
|
||||
desiredAccessWriteDac desiredAccess = 0x40000
|
||||
|
||||
gvmga = "GrantVmGroupAccess:"
|
||||
|
||||
inheritModeNoInheritance inheritMode = 0x0
|
||||
inheritModeSubContainersAndObjectsInherit inheritMode = 0x3
|
||||
|
||||
objectTypeFileObject objectType = 0x1
|
||||
|
||||
securityInformationDACL securityInformation = 0x4
|
||||
|
||||
shareModeRead shareMode = 0x1
|
||||
shareModeWrite shareMode = 0x2
|
||||
|
||||
sidVmGroup = "S-1-5-83-0"
|
||||
|
||||
trusteeFormIsSid trusteeForm = 0
|
||||
|
||||
trusteeTypeWellKnownGroup trusteeType = 5
|
||||
)
|
||||
|
||||
// GrantVMGroupAccess sets the DACL for a specified file or directory to
|
||||
// include Grant ACE entries for the VM Group SID. This is a golang re-
|
||||
// implementation of the same function in vmcompute, just not exported in
|
||||
// RS5. Which kind of sucks. Sucks a lot :/
|
||||
func GrantVmGroupAccess(name string) error {
|
||||
// Stat (to determine if `name` is a directory).
|
||||
s, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
|
||||
}
|
||||
|
||||
// Get a handle to the file/directory. Must defer Close on success.
|
||||
fd, err := createFile(name, s.IsDir())
|
||||
if err != nil {
|
||||
return err // Already wrapped
|
||||
}
|
||||
defer syscall.CloseHandle(fd)
|
||||
|
||||
// Get the current DACL and Security Descriptor. Must defer LocalFree on success.
|
||||
ot := objectTypeFileObject
|
||||
si := securityInformationDACL
|
||||
sd := uintptr(0)
|
||||
origDACL := uintptr(0)
|
||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||
|
||||
// Generate a new DACL which is the current DACL with the required ACEs added.
|
||||
// Must defer LocalFree on success.
|
||||
newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL)
|
||||
if err != nil {
|
||||
return err // Already wrapped
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL)))
|
||||
|
||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createFile is a helper function to call [Nt]CreateFile to get a handle to
|
||||
// the file or directory.
|
||||
func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
namep := syscall.StringToUTF16(name)
|
||||
da := uint32(desiredAccessReadControl | desiredAccessWriteDac)
|
||||
sm := uint32(shareModeRead | shareModeWrite)
|
||||
fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL)
|
||||
if isDir {
|
||||
fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
}
|
||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added.
|
||||
// The caller is responsible for LocalFree of the returned DACL on success.
|
||||
func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) {
|
||||
// Generate pointers to the SIDs based on the string SIDs
|
||||
sid, err := syscall.StringToSid(sidVmGroup)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
|
||||
}
|
||||
|
||||
inheritance := inheritModeNoInheritance
|
||||
if isDir {
|
||||
inheritance = inheritModeSubContainersAndObjectsInherit
|
||||
}
|
||||
|
||||
eaArray := []explicitAccess{
|
||||
explicitAccess{
|
||||
accessPermissions: accessMaskDesiredPermission,
|
||||
accessMode: accessModeGrant,
|
||||
inheritance: inheritance,
|
||||
trustee: trustee{
|
||||
trusteeForm: trusteeFormIsSid,
|
||||
trusteeType: trusteeTypeWellKnownGroup,
|
||||
name: uintptr(unsafe.Pointer(sid)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
modifiedDACL := uintptr(0)
|
||||
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
||||
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
|
||||
}
|
||||
|
||||
return modifiedDACL, nil
|
||||
}
|
7
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
7
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package security
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
|
||||
|
||||
//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo
|
||||
//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo
|
||||
//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW
|
81
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
81
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
@ -1,81 +0,0 @@
|
||||
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package security
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo")
|
||||
procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
|
||||
procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW")
|
||||
)
|
||||
|
||||
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
202
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
202
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
@ -1,202 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
||||
const (
|
||||
SE_PRIVILEGE_ENABLED = 2
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
securityAnonymous = iota
|
||||
securityIdentification
|
||||
securityImpersonation
|
||||
securityDelegation
|
||||
)
|
||||
|
||||
var (
|
||||
privNames = make(map[string]uint64)
|
||||
privNameMutex sync.Mutex
|
||||
)
|
||||
|
||||
// PrivilegeError represents an error enabling privileges.
|
||||
type PrivilegeError struct {
|
||||
privileges []uint64
|
||||
}
|
||||
|
||||
func (e *PrivilegeError) Error() string {
|
||||
s := ""
|
||||
if len(e.privileges) > 1 {
|
||||
s = "Could not enable privileges "
|
||||
} else {
|
||||
s = "Could not enable privilege "
|
||||
}
|
||||
for i, p := range e.privileges {
|
||||
if i != 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += `"`
|
||||
s += getPrivilegeName(p)
|
||||
s += `"`
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RunWithPrivilege enables a single privilege for a function call.
|
||||
func RunWithPrivilege(name string, fn func() error) error {
|
||||
return RunWithPrivileges([]string{name}, fn)
|
||||
}
|
||||
|
||||
// RunWithPrivileges enables privileges for a function call.
|
||||
func RunWithPrivileges(names []string, fn func() error) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
token, err := newThreadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer releaseThreadToken(token)
|
||||
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn()
|
||||
}
|
||||
|
||||
func mapPrivileges(names []string) ([]uint64, error) {
|
||||
var privileges []uint64
|
||||
privNameMutex.Lock()
|
||||
defer privNameMutex.Unlock()
|
||||
for _, name := range names {
|
||||
p, ok := privNames[name]
|
||||
if !ok {
|
||||
err := lookupPrivilegeValue("", name, &p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privNames[name] = p
|
||||
}
|
||||
privileges = append(privileges, p)
|
||||
}
|
||||
return privileges, nil
|
||||
}
|
||||
|
||||
// EnableProcessPrivileges enables privileges globally for the process.
|
||||
func EnableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
|
||||
}
|
||||
|
||||
// DisableProcessPrivileges disables privileges globally for the process.
|
||||
func DisableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, 0)
|
||||
}
|
||||
|
||||
func enableDisableProcessPrivilege(names []string, action uint32) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, _ := windows.GetCurrentProcess()
|
||||
var token windows.Token
|
||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer token.Close()
|
||||
return adjustPrivileges(token, privileges, action)
|
||||
}
|
||||
|
||||
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||
for _, p := range privileges {
|
||||
binary.Write(&b, binary.LittleEndian, p)
|
||||
binary.Write(&b, binary.LittleEndian, action)
|
||||
}
|
||||
prevState := make([]byte, b.Len())
|
||||
reqSize := uint32(0)
|
||||
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
|
||||
if !success {
|
||||
return err
|
||||
}
|
||||
if err == ERROR_NOT_ALL_ASSIGNED {
|
||||
return &PrivilegeError{privileges}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPrivilegeName(luid uint64) string {
|
||||
var nameBuffer [256]uint16
|
||||
bufSize := uint32(len(nameBuffer))
|
||||
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %d>", luid)
|
||||
}
|
||||
|
||||
var displayNameBuffer [256]uint16
|
||||
displayBufSize := uint32(len(displayNameBuffer))
|
||||
var langID uint32
|
||||
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
|
||||
}
|
||||
|
||||
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
|
||||
}
|
||||
|
||||
func newThreadToken() (windows.Token, error) {
|
||||
err := impersonateSelf(securityImpersonation)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var token windows.Token
|
||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
||||
if err != nil {
|
||||
rerr := revertToSelf()
|
||||
if rerr != nil {
|
||||
panic(rerr)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func releaseThreadToken(h windows.Token) {
|
||||
err := revertToSelf()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h.Close()
|
||||
}
|
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
reparseTagMountPoint = 0xA0000003
|
||||
reparseTagSymlink = 0xA000000C
|
||||
)
|
||||
|
||||
type reparseDataBuffer struct {
|
||||
ReparseTag uint32
|
||||
ReparseDataLength uint16
|
||||
Reserved uint16
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
}
|
||||
|
||||
// ReparsePoint describes a Win32 symlink or mount point.
|
||||
type ReparsePoint struct {
|
||||
Target string
|
||||
IsMountPoint bool
|
||||
}
|
||||
|
||||
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
|
||||
// mount point reparse point.
|
||||
type UnsupportedReparsePointError struct {
|
||||
Tag uint32
|
||||
}
|
||||
|
||||
func (e *UnsupportedReparsePointError) Error() string {
|
||||
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
|
||||
}
|
||||
|
||||
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||
// or a mount point.
|
||||
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
|
||||
tag := binary.LittleEndian.Uint32(b[0:4])
|
||||
return DecodeReparsePointData(tag, b[8:])
|
||||
}
|
||||
|
||||
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
|
||||
isMountPoint := false
|
||||
switch tag {
|
||||
case reparseTagMountPoint:
|
||||
isMountPoint = true
|
||||
case reparseTagSymlink:
|
||||
default:
|
||||
return nil, &UnsupportedReparsePointError{tag}
|
||||
}
|
||||
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
|
||||
if !isMountPoint {
|
||||
nameOffset += 4
|
||||
}
|
||||
nameLength := binary.LittleEndian.Uint16(b[6:8])
|
||||
name := make([]uint16, nameLength/2)
|
||||
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
|
||||
}
|
||||
|
||||
func isDriveLetter(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
|
||||
// mount point.
|
||||
func EncodeReparsePoint(rp *ReparsePoint) []byte {
|
||||
// Generate an NT path and determine if this is a relative path.
|
||||
var ntTarget string
|
||||
relative := false
|
||||
if strings.HasPrefix(rp.Target, `\\?\`) {
|
||||
ntTarget = `\??\` + rp.Target[4:]
|
||||
} else if strings.HasPrefix(rp.Target, `\\`) {
|
||||
ntTarget = `\??\UNC\` + rp.Target[2:]
|
||||
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
|
||||
ntTarget = `\??\` + rp.Target
|
||||
} else {
|
||||
ntTarget = rp.Target
|
||||
relative = true
|
||||
}
|
||||
|
||||
// The paths must be NUL-terminated even though they are counted strings.
|
||||
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
|
||||
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
|
||||
|
||||
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
|
||||
size += len(ntTarget16)*2 + len(target16)*2
|
||||
|
||||
tag := uint32(reparseTagMountPoint)
|
||||
if !rp.IsMountPoint {
|
||||
tag = reparseTagSymlink
|
||||
size += 4 // Add room for symlink flags
|
||||
}
|
||||
|
||||
data := reparseDataBuffer{
|
||||
ReparseTag: tag,
|
||||
ReparseDataLength: uint16(size),
|
||||
SubstituteNameOffset: 0,
|
||||
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
|
||||
PrintNameOffset: uint16(len(ntTarget16) * 2),
|
||||
PrintNameLength: uint16((len(target16) - 1) * 2),
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, &data)
|
||||
if !rp.IsMountPoint {
|
||||
flags := uint32(0)
|
||||
if relative {
|
||||
flags |= 1
|
||||
}
|
||||
binary.Write(&b, binary.LittleEndian, flags)
|
||||
}
|
||||
|
||||
binary.Write(&b, binary.LittleEndian, ntTarget16)
|
||||
binary.Write(&b, binary.LittleEndian, target16)
|
||||
return b.Bytes()
|
||||
}
|
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||
//sys localFree(mem uintptr) = LocalFree
|
||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||
|
||||
const (
|
||||
cERROR_NONE_MAPPED = syscall.Errno(1332)
|
||||
)
|
||||
|
||||
type AccountLookupError struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AccountLookupError) Error() string {
|
||||
if e.Name == "" {
|
||||
return "lookup account: empty account name specified"
|
||||
}
|
||||
var s string
|
||||
switch e.Err {
|
||||
case cERROR_NONE_MAPPED:
|
||||
s = "not found"
|
||||
default:
|
||||
s = e.Err.Error()
|
||||
}
|
||||
return "lookup account " + e.Name + ": " + s
|
||||
}
|
||||
|
||||
type SddlConversionError struct {
|
||||
Sddl string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *SddlConversionError) Error() string {
|
||||
return "convert " + e.Sddl + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// LookupSidByName looks up the SID of an account by name
|
||||
func LookupSidByName(name string) (sid string, err error) {
|
||||
if name == "" {
|
||||
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
var sidSize, sidNameUse, refDomainSize uint32
|
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sidBuffer := make([]byte, sidSize)
|
||||
refDomainBuffer := make([]uint16, refDomainSize)
|
||||
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
var strBuffer *uint16
|
||||
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||
localFree(uintptr(unsafe.Pointer(strBuffer)))
|
||||
return sid, nil
|
||||
}
|
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
var sdBuffer uintptr
|
||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
|
||||
if err != nil {
|
||||
return nil, &SddlConversionError{sddl, err}
|
||||
}
|
||||
defer localFree(sdBuffer)
|
||||
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
|
||||
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||
var sddl *uint16
|
||||
// The returned string length seems to including an aribtrary number of terminating NULs.
|
||||
// Don't use it.
|
||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer localFree(uintptr(unsafe.Pointer(sddl)))
|
||||
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
|
||||
}
|
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
package winio
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
151
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
151
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
@ -1,151 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package vhd
|
||||
|
||||
import "syscall"
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.OpenVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = VirtDisk.DetachVirtualDisk
|
||||
|
||||
type virtualStorageType struct {
|
||||
DeviceID uint32
|
||||
VendorID [16]byte
|
||||
}
|
||||
|
||||
type (
|
||||
createVirtualDiskFlag uint32
|
||||
VirtualDiskAccessMask uint32
|
||||
VirtualDiskFlag uint32
|
||||
)
|
||||
|
||||
const (
|
||||
// Flags for creating a VHD (not exported)
|
||||
createVirtualDiskFlagNone createVirtualDiskFlag = 0
|
||||
createVirtualDiskFlagFullPhysicalAllocation createVirtualDiskFlag = 1
|
||||
createVirtualDiskFlagPreventWritesToSourceDisk createVirtualDiskFlag = 2
|
||||
createVirtualDiskFlagDoNotCopyMetadataFromParent createVirtualDiskFlag = 4
|
||||
|
||||
// Access Mask for opening a VHD
|
||||
VirtualDiskAccessNone VirtualDiskAccessMask = 0
|
||||
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 65536
|
||||
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 131072
|
||||
VirtualDiskAccessDetach VirtualDiskAccessMask = 262144
|
||||
VirtualDiskAccessGetInfo VirtualDiskAccessMask = 524288
|
||||
VirtualDiskAccessCreate VirtualDiskAccessMask = 1048576
|
||||
VirtualDiskAccessMetaOps VirtualDiskAccessMask = 2097152
|
||||
VirtualDiskAccessRead VirtualDiskAccessMask = 851968
|
||||
VirtualDiskAccessAll VirtualDiskAccessMask = 4128768
|
||||
VirtualDiskAccessWritable VirtualDiskAccessMask = 3276800
|
||||
|
||||
// Flags for opening a VHD
|
||||
OpenVirtualDiskFlagNone VirtualDiskFlag = 0
|
||||
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x1
|
||||
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x2
|
||||
OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x4
|
||||
OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x8
|
||||
OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x10
|
||||
OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x20
|
||||
OpenVirtualDiskFlagVhdSetFileOnly VirtualDiskFlag = 0x40
|
||||
OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x80
|
||||
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x100
|
||||
)
|
||||
|
||||
type createVersion2 struct {
|
||||
UniqueID [16]byte // GUID
|
||||
MaximumSize uint64
|
||||
BlockSizeInBytes uint32
|
||||
SectorSizeInBytes uint32
|
||||
ParentPath *uint16 // string
|
||||
SourcePath *uint16 // string
|
||||
OpenFlags uint32
|
||||
ParentVirtualStorageType virtualStorageType
|
||||
SourceVirtualStorageType virtualStorageType
|
||||
ResiliencyGUID [16]byte // GUID
|
||||
}
|
||||
|
||||
type createVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 createVersion2
|
||||
}
|
||||
|
||||
type openVersion2 struct {
|
||||
GetInfoOnly int32 // bool but 4-byte aligned
|
||||
ReadOnly int32 // bool but 4-byte aligned
|
||||
ResiliencyGUID [16]byte // GUID
|
||||
}
|
||||
|
||||
type openVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 openVersion2
|
||||
}
|
||||
|
||||
// CreateVhdx will create a simple vhdx file at the given path using default values.
|
||||
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
var (
|
||||
defaultType virtualStorageType
|
||||
handle syscall.Handle
|
||||
)
|
||||
|
||||
parameters := createVirtualDiskParameters{
|
||||
Version: 2,
|
||||
Version2: createVersion2{
|
||||
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
|
||||
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
|
||||
},
|
||||
}
|
||||
|
||||
if err := createVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
uint32(VirtualDiskAccessNone),
|
||||
nil,
|
||||
uint32(createVirtualDiskFlagNone),
|
||||
0,
|
||||
¶meters,
|
||||
nil,
|
||||
&handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachVhd detaches a mounted container layer vhd found at `path`.
|
||||
func DetachVhd(path string) error {
|
||||
handle, err := OpenVirtualDisk(
|
||||
path,
|
||||
VirtualDiskAccessNone,
|
||||
OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
return detachVirtualDisk(handle, 0, 0)
|
||||
}
|
||||
|
||||
// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
|
||||
func OpenVirtualDisk(path string, accessMask VirtualDiskAccessMask, flag VirtualDiskFlag) (syscall.Handle, error) {
|
||||
var (
|
||||
defaultType virtualStorageType
|
||||
handle syscall.Handle
|
||||
)
|
||||
parameters := openVirtualDiskParameters{Version: 2}
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
uint32(accessMask),
|
||||
uint32(flag),
|
||||
¶meters,
|
||||
&handle); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return handle, nil
|
||||
}
|
99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package vhd
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
|
||||
|
||||
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
|
||||
procOpenVirtualDisk = modVirtDisk.NewProc("OpenVirtualDisk")
|
||||
procDetachVirtualDisk = modVirtDisk.NewProc("DetachVirtualDisk")
|
||||
)
|
||||
|
||||
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
|
||||
}
|
||||
|
||||
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(flags), uintptr(providerSpecificFlags))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
562
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
562
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -1,562 +0,0 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
|
||||
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
)
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||
newport = syscall.Handle(r0)
|
||||
if newport == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
|
||||
}
|
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(str)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
|
||||
}
|
||||
|
||||
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localFree(mem uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
|
||||
len = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
var _p0 uint32
|
||||
if releaseAll {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func impersonateSelf(level uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func revertToSelf() (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
var _p0 uint32
|
||||
if openAsSelf {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCurrentThread() (h syscall.Handle) {
|
||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
|
||||
h = syscall.Handle(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeValue(_p0, _p1, luid)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeName(_p0, luid, buffer, size)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
} else {
|
||||
_p1 = 0
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
} else {
|
||||
_p2 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
} else {
|
||||
_p1 = 0
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
} else {
|
||||
_p2 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
21
vendor/github.com/Microsoft/hcsshim/LICENSE
generated
vendored
21
vendor/github.com/Microsoft/hcsshim/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
46
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
46
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
@ -1,46 +0,0 @@
|
||||
# hcsshim
|
||||
|
||||
[](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
|
||||
|
||||
This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
|
||||
|
||||
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
## Dependencies
|
||||
|
||||
This project requires Golang 1.9 or newer to build.
|
||||
|
||||
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
Security issues and bugs should be reported privately, via email, to the Microsoft Security
|
||||
Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should
|
||||
receive a response within 24 hours. If for some reason you do not, please follow up via
|
||||
email to ensure we received your original message. Further information, including the
|
||||
[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in
|
||||
the [Security TechCenter](https://technet.microsoft.com/en-us/security/default).
|
||||
|
||||
For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet
|
||||
|
||||
---------------
|
||||
Copyright (c) 2018 Microsoft Corp. All rights reserved.
|
1
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/doc.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/doc.go
generated
vendored
@ -1 +0,0 @@
|
||||
package options
|
1261
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
generated
vendored
1261
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
80
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
generated
vendored
80
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto
generated
vendored
@ -1,80 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.runhcs.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options;options";
|
||||
|
||||
// Options are the set of customizations that can be passed at Create time.
|
||||
message Options {
|
||||
// enable debug tracing
|
||||
bool debug = 1;
|
||||
|
||||
enum DebugType {
|
||||
NPIPE = 0;
|
||||
FILE = 1;
|
||||
ETW = 2;
|
||||
}
|
||||
|
||||
// debug tracing output type
|
||||
DebugType debug_type = 2;
|
||||
|
||||
// registry key root for storage of the runhcs container state
|
||||
string registry_root = 3;
|
||||
|
||||
// sandbox_image is the image to use for the sandbox that matches the
|
||||
// sandbox_platform.
|
||||
string sandbox_image = 4;
|
||||
|
||||
// sandbox_platform is a CRI setting that specifies the platform
|
||||
// architecture for all sandbox's in this runtime. Values are
|
||||
// 'windows/amd64' and 'linux/amd64'.
|
||||
string sandbox_platform = 5;
|
||||
|
||||
enum SandboxIsolation {
|
||||
PROCESS = 0;
|
||||
HYPERVISOR = 1;
|
||||
}
|
||||
|
||||
// sandbox_isolation is a CRI setting that specifies the isolation level of
|
||||
// the sandbox. For Windows runtime PROCESS and HYPERVISOR are valid. For
|
||||
// LCOW only HYPERVISOR is valid and default if omitted.
|
||||
SandboxIsolation sandbox_isolation = 6;
|
||||
|
||||
// boot_files_root_path is the path to the directory containing the LCOW
|
||||
// kernel and root FS files.
|
||||
string boot_files_root_path = 7;
|
||||
|
||||
// vm_processor_count is the default number of processors to create for the
|
||||
// hypervisor isolated utility vm.
|
||||
//
|
||||
// The platform default if omitted is 2, unless the host only has a single
|
||||
// core in which case it is 1.
|
||||
int32 vm_processor_count = 8;
|
||||
|
||||
// vm_memory_size_in_mb is the default amount of memory to assign to the
|
||||
// hypervisor isolated utility vm.
|
||||
//
|
||||
// The platform default is 1024MB if omitted.
|
||||
int32 vm_memory_size_in_mb = 9;
|
||||
|
||||
// GPUVHDPath is the path to the gpu vhd to add to the uvm
|
||||
// when a container requests a gpu
|
||||
string GPUVHDPath = 10;
|
||||
}
|
||||
|
||||
// ProcessDetails contains additional information about a process. This is the additional
|
||||
// info returned in the Pids query.
|
||||
message ProcessDetails {
|
||||
string image_name = 1;
|
||||
google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
uint64 kernel_time_100_ns = 3;
|
||||
uint64 memory_commit_bytes = 4;
|
||||
uint64 memory_working_set_private_bytes = 5;
|
||||
uint64 memory_working_set_shared_bytes = 6;
|
||||
uint32 process_id = 7;
|
||||
uint64 user_time_100_ns = 8;
|
||||
string exec_id = 9;
|
||||
}
|
6
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/doc.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/doc.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
package stats
|
||||
|
||||
import (
|
||||
// go mod will not vendor without an import for metrics.proto
|
||||
_ "github.com/containerd/cgroups/stats/v1"
|
||||
)
|
2819
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go
generated
vendored
2819
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
70
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto
generated
vendored
70
vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto
generated
vendored
@ -1,70 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.runhcs.stats.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/containerd/cgroups/stats/v1/metrics.proto";
|
||||
|
||||
option go_package = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats;stats";
|
||||
|
||||
message Statistics {
|
||||
oneof container {
|
||||
WindowsContainerStatistics windows = 1;
|
||||
io.containerd.cgroups.v1.Metrics linux = 2;
|
||||
}
|
||||
VirtualMachineStatistics vm = 3 [(gogoproto.customname) = "VM"];
|
||||
}
|
||||
|
||||
message WindowsContainerStatistics {
|
||||
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
google.protobuf.Timestamp container_start_time = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
uint64 uptime_ns = 3 [(gogoproto.customname) = "UptimeNS"];
|
||||
WindowsContainerProcessorStatistics processor = 4;
|
||||
WindowsContainerMemoryStatistics memory = 5;
|
||||
WindowsContainerStorageStatistics storage = 6;
|
||||
}
|
||||
|
||||
message WindowsContainerProcessorStatistics {
|
||||
uint64 total_runtime_ns = 1 [(gogoproto.customname) = "TotalRuntimeNS"];
|
||||
uint64 runtime_user_ns = 2 [(gogoproto.customname) = "RuntimeUserNS"];
|
||||
uint64 runtime_kernel_ns = 3 [(gogoproto.customname) = "RuntimeKernelNS"];
|
||||
}
|
||||
|
||||
message WindowsContainerMemoryStatistics {
|
||||
uint64 memory_usage_commit_bytes = 1;
|
||||
uint64 memory_usage_commit_peak_bytes = 2;
|
||||
uint64 memory_usage_private_working_set_bytes = 3;
|
||||
}
|
||||
|
||||
message WindowsContainerStorageStatistics {
|
||||
uint64 read_count_normalized = 1;
|
||||
uint64 read_size_bytes = 2;
|
||||
uint64 write_count_normalized = 3;
|
||||
uint64 write_size_bytes = 4;
|
||||
}
|
||||
|
||||
message VirtualMachineStatistics {
|
||||
VirtualMachineProcessorStatistics processor = 1;
|
||||
VirtualMachineMemoryStatistics memory = 2;
|
||||
}
|
||||
|
||||
message VirtualMachineProcessorStatistics {
|
||||
uint64 total_runtime_ns = 1 [(gogoproto.customname) = "TotalRuntimeNS"];
|
||||
}
|
||||
|
||||
message VirtualMachineMemoryStatistics {
|
||||
uint64 working_set_bytes = 1;
|
||||
uint32 virtual_node_count = 2;
|
||||
VirtualMachineMemory vm_memory = 3;
|
||||
}
|
||||
|
||||
message VirtualMachineMemory {
|
||||
int32 available_memory = 1;
|
||||
int32 available_memory_buffer = 2;
|
||||
uint64 reserved_memory = 3;
|
||||
uint64 assigned_memory = 4;
|
||||
bool slp_active = 5;
|
||||
bool balancing_enabled = 6;
|
||||
bool dm_operation_in_progress = 7;
|
||||
}
|
223
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
223
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
@ -1,223 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/mergemaps"
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
)
|
||||
|
||||
// ContainerProperties holds the properties for a container and the processes running in that container
|
||||
type ContainerProperties = schema1.ContainerProperties
|
||||
|
||||
// MemoryStats holds the memory statistics for a container
|
||||
type MemoryStats = schema1.MemoryStats
|
||||
|
||||
// ProcessorStats holds the processor statistics for a container
|
||||
type ProcessorStats = schema1.ProcessorStats
|
||||
|
||||
// StorageStats holds the storage statistics for a container
|
||||
type StorageStats = schema1.StorageStats
|
||||
|
||||
// NetworkStats holds the network statistics for a container
|
||||
type NetworkStats = schema1.NetworkStats
|
||||
|
||||
// Statistics is the structure returned by a statistics call on a container
|
||||
type Statistics = schema1.Statistics
|
||||
|
||||
// ProcessList is the structure of an item returned by a ProcessList call on a container
|
||||
type ProcessListItem = schema1.ProcessListItem
|
||||
|
||||
// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container
|
||||
type MappedVirtualDiskController = schema1.MappedVirtualDiskController
|
||||
|
||||
// Type of Request Support in ModifySystem
|
||||
type RequestType = schema1.RequestType
|
||||
|
||||
// Type of Resource Support in ModifySystem
|
||||
type ResourceType = schema1.ResourceType
|
||||
|
||||
// RequestType const
|
||||
const (
|
||||
Add = schema1.Add
|
||||
Remove = schema1.Remove
|
||||
Network = schema1.Network
|
||||
)
|
||||
|
||||
// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system
|
||||
// Supported resource types are Network and Request Types are Add/Remove
|
||||
type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse
|
||||
|
||||
type container struct {
|
||||
system *hcs.System
|
||||
waitOnce sync.Once
|
||||
waitErr error
|
||||
waitCh chan struct{}
|
||||
}
|
||||
|
||||
// createComputeSystemAdditionalJSON is read from the environment at initialisation
|
||||
// time. It allows an environment variable to define additional JSON which
|
||||
// is merged in the CreateComputeSystem call to HCS.
|
||||
var createContainerAdditionalJSON []byte
|
||||
|
||||
func init() {
|
||||
createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON"))
|
||||
}
|
||||
|
||||
// CreateContainer creates a new container with the given configuration but does not start it.
|
||||
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
|
||||
fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err)
|
||||
}
|
||||
|
||||
system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &container{system: system}, err
|
||||
}
|
||||
|
||||
// OpenContainer opens an existing container by ID.
|
||||
func OpenContainer(id string) (Container, error) {
|
||||
system, err := hcs.OpenComputeSystem(context.Background(), id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &container{system: system}, err
|
||||
}
|
||||
|
||||
// GetContainers gets a list of the containers on the system that match the query
|
||||
func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
|
||||
return hcs.GetComputeSystems(context.Background(), q)
|
||||
}
|
||||
|
||||
// Start synchronously starts the container.
|
||||
func (container *container) Start() error {
|
||||
return convertSystemError(container.system.Start(context.Background()), container)
|
||||
}
|
||||
|
||||
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
|
||||
func (container *container) Shutdown() error {
|
||||
err := container.system.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
return convertSystemError(err, container)
|
||||
}
|
||||
return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"}
|
||||
}
|
||||
|
||||
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
|
||||
func (container *container) Terminate() error {
|
||||
err := container.system.Terminate(context.Background())
|
||||
if err != nil {
|
||||
return convertSystemError(err, container)
|
||||
}
|
||||
return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"}
|
||||
}
|
||||
|
||||
// Waits synchronously waits for the container to shutdown or terminate.
|
||||
func (container *container) Wait() error {
|
||||
err := container.system.Wait()
|
||||
if err == nil {
|
||||
err = container.system.ExitError()
|
||||
}
|
||||
return convertSystemError(err, container)
|
||||
}
|
||||
|
||||
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
|
||||
// returns false if timeout occurs.
|
||||
func (container *container) WaitTimeout(timeout time.Duration) error {
|
||||
container.waitOnce.Do(func() {
|
||||
container.waitCh = make(chan struct{})
|
||||
go func() {
|
||||
container.waitErr = container.Wait()
|
||||
close(container.waitCh)
|
||||
}()
|
||||
})
|
||||
t := time.NewTimer(timeout)
|
||||
defer t.Stop()
|
||||
select {
|
||||
case <-t.C:
|
||||
return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"}
|
||||
case <-container.waitCh:
|
||||
return container.waitErr
|
||||
}
|
||||
}
|
||||
|
||||
// Pause pauses the execution of a container.
|
||||
func (container *container) Pause() error {
|
||||
return convertSystemError(container.system.Pause(context.Background()), container)
|
||||
}
|
||||
|
||||
// Resume resumes the execution of a container.
|
||||
func (container *container) Resume() error {
|
||||
return convertSystemError(container.system.Resume(context.Background()), container)
|
||||
}
|
||||
|
||||
// HasPendingUpdates returns true if the container has updates pending to install
|
||||
func (container *container) HasPendingUpdates() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Statistics returns statistics for the container. This is a legacy v1 call
|
||||
func (container *container) Statistics() (Statistics, error) {
|
||||
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics)
|
||||
if err != nil {
|
||||
return Statistics{}, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
return properties.Statistics, nil
|
||||
}
|
||||
|
||||
// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call
|
||||
func (container *container) ProcessList() ([]ProcessListItem, error) {
|
||||
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
return properties.ProcessList, nil
|
||||
}
|
||||
|
||||
// This is a legacy v1 call
|
||||
func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
|
||||
properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
|
||||
return properties.MappedVirtualDiskControllers, nil
|
||||
}
|
||||
|
||||
// CreateProcess launches a new process within the container.
|
||||
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
|
||||
p, err := container.system.CreateProcess(context.Background(), c)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
return &process{p: p.(*hcs.Process)}, nil
|
||||
}
|
||||
|
||||
// OpenProcess gets an interface to an existing process within the container.
|
||||
func (container *container) OpenProcess(pid int) (Process, error) {
|
||||
p, err := container.system.OpenProcess(context.Background(), pid)
|
||||
if err != nil {
|
||||
return nil, convertSystemError(err, container)
|
||||
}
|
||||
return &process{p: p}, nil
|
||||
}
|
||||
|
||||
// Close cleans up any state associated with the container but does not terminate or wait for it.
|
||||
func (container *container) Close() error {
|
||||
return convertSystemError(container.system.Close(), container)
|
||||
}
|
||||
|
||||
// Modify the System
|
||||
func (container *container) Modify(config *ResourceModificationRequestResponse) error {
|
||||
return convertSystemError(container.system.Modify(context.Background(), config), container)
|
||||
}
|
257
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
257
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
@ -1,257 +0,0 @@
|
||||
package hcsshim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hns"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist
|
||||
ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrElementNotFound = hcs.ErrElementNotFound
|
||||
|
||||
// ErrElementNotFound is an error encountered when the object being referenced does not exist
|
||||
ErrNotSupported = hcs.ErrNotSupported
|
||||
|
||||
// ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
|
||||
// decimal -2147024883 / hex 0x8007000d
|
||||
ErrInvalidData = hcs.ErrInvalidData
|
||||
|
||||
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
|
||||
ErrHandleClose = hcs.ErrHandleClose
|
||||
|
||||
// ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
|
||||
ErrAlreadyClosed = hcs.ErrAlreadyClosed
|
||||
|
||||
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
|
||||
ErrInvalidNotificationType = hcs.ErrInvalidNotificationType
|
||||
|
||||
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
|
||||
ErrInvalidProcessState = hcs.ErrInvalidProcessState
|
||||
|
||||
// ErrTimeout is an error encountered when waiting on a notification times out
|
||||
ErrTimeout = hcs.ErrTimeout
|
||||
|
||||
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
|
||||
// a different expected notification
|
||||
ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit
|
||||
|
||||
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
|
||||
// is lost while waiting for a notification
|
||||
ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort
|
||||
|
||||
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
|
||||
ErrUnexpectedValue = hcs.ErrUnexpectedValue
|
||||
|
||||
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
|
||||
ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped
|
||||
|
||||
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
|
||||
ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending
|
||||
|
||||
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
|
||||
ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState
|
||||
|
||||
// ErrProcNotFound is an error encountered when the the process cannot be found
|
||||
ErrProcNotFound = hcs.ErrProcNotFound
|
||||
|
||||
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
|
||||
// builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
|
||||
ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied
|
||||
|
||||
// ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
|
||||
ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON
|
||||
|
||||
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
|
||||
ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage
|
||||
|
||||
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
||||
ErrPlatformNotSupported = hcs.ErrPlatformNotSupported
|
||||
)
|
||||
|
||||
type EndpointNotFoundError = hns.EndpointNotFoundError
|
||||
type NetworkNotFoundError = hns.NetworkNotFoundError
|
||||
|
||||
// ProcessError is an error encountered in HCS during an operation on a Process object
|
||||
type ProcessError struct {
|
||||
Process *process
|
||||
Operation string
|
||||
ExtraInfo string
|
||||
Err error
|
||||
Events []hcs.ErrorEvent
|
||||
}
|
||||
|
||||
// ContainerError is an error encountered in HCS during an operation on a Container object
|
||||
type ContainerError struct {
|
||||
Container *container
|
||||
Operation string
|
||||
ExtraInfo string
|
||||
Err error
|
||||
Events []hcs.ErrorEvent
|
||||
}
|
||||
|
||||
func (e *ContainerError) Error() string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
if e.Container == nil {
|
||||
return "unexpected nil container for error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
s := "container " + e.Container.system.ID()
|
||||
|
||||
if e.Operation != "" {
|
||||
s += " encountered an error during " + e.Operation
|
||||
}
|
||||
|
||||
switch e.Err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case syscall.Errno:
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err))
|
||||
default:
|
||||
s += fmt.Sprintf(": %s", e.Err.Error())
|
||||
}
|
||||
|
||||
for _, ev := range e.Events {
|
||||
s += "\n" + ev.String()
|
||||
}
|
||||
|
||||
if e.ExtraInfo != "" {
|
||||
s += " extra info: " + e.ExtraInfo
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func makeContainerError(container *container, operation string, extraInfo string, err error) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*ContainerError); ok {
|
||||
return err
|
||||
}
|
||||
containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err}
|
||||
return containerError
|
||||
}
|
||||
|
||||
func (e *ProcessError) Error() string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
if e.Process == nil {
|
||||
return "Unexpected nil process for error: " + e.Err.Error()
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID())
|
||||
if e.Operation != "" {
|
||||
s += " encountered an error during " + e.Operation
|
||||
}
|
||||
|
||||
switch e.Err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case syscall.Errno:
|
||||
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err))
|
||||
default:
|
||||
s += fmt.Sprintf(": %s", e.Err.Error())
|
||||
}
|
||||
|
||||
for _, ev := range e.Events {
|
||||
s += "\n" + ev.String()
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func makeProcessError(process *process, operation string, extraInfo string, err error) error {
|
||||
// Don't double wrap errors
|
||||
if _, ok := err.(*ProcessError); ok {
|
||||
return err
|
||||
}
|
||||
processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err}
|
||||
return processError
|
||||
}
|
||||
|
||||
// IsNotExist checks if an error is caused by the Container or Process not existing.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsNotExist(err error) bool {
|
||||
if _, ok := err.(EndpointNotFoundError); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(NetworkNotFoundError); ok {
|
||||
return true
|
||||
}
|
||||
return hcs.IsNotExist(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||
// already closed by a call to the Close() method.
|
||||
func IsAlreadyClosed(err error) bool {
|
||||
return hcs.IsAlreadyClosed(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsPending returns a boolean indicating whether the error is that
|
||||
// the requested operation is being completed in the background.
|
||||
func IsPending(err error) bool {
|
||||
return hcs.IsPending(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsTimeout returns a boolean indicating whether the error is caused by
|
||||
// a timeout waiting for the operation to complete.
|
||||
func IsTimeout(err error) bool {
|
||||
return hcs.IsTimeout(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
|
||||
// a Container or Process being already stopped.
|
||||
// Note: Currently, ErrElementNotFound can mean that a Process has either
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
|
||||
func IsAlreadyStopped(err error) bool {
|
||||
return hcs.IsAlreadyStopped(getInnerError(err))
|
||||
}
|
||||
|
||||
// IsNotSupported returns a boolean indicating whether the error is caused by
|
||||
// unsupported platform requests
|
||||
// Note: Currently Unsupported platform requests can be mean either
|
||||
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
|
||||
// is thrown from the Platform
|
||||
func IsNotSupported(err error) bool {
|
||||
return hcs.IsNotSupported(getInnerError(err))
|
||||
}
|
||||
|
||||
func getInnerError(err error) error {
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *ContainerError:
|
||||
err = pe.Err
|
||||
case *ProcessError:
|
||||
err = pe.Err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func convertSystemError(err error, c *container) error {
|
||||
if serr, ok := err.(*hcs.SystemError); ok {
|
||||
return &ContainerError{Container: c, Operation: serr.Op, ExtraInfo: serr.Extra, Err: serr.Err, Events: serr.Events}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func convertProcessError(err error, p *process) error {
|
||||
if perr, ok := err.(*hcs.ProcessError); ok {
|
||||
return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events}
|
||||
}
|
||||
return err
|
||||
}
|
1263
vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go
generated
vendored
1263
vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go
generated
vendored
File diff suppressed because it is too large
Load Diff
411
vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go
generated
vendored
411
vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go
generated
vendored
@ -1,411 +0,0 @@
|
||||
package format
|
||||
|
||||
type SuperBlock struct {
|
||||
InodesCount uint32
|
||||
BlocksCountLow uint32
|
||||
RootBlocksCountLow uint32
|
||||
FreeBlocksCountLow uint32
|
||||
FreeInodesCount uint32
|
||||
FirstDataBlock uint32
|
||||
LogBlockSize uint32
|
||||
LogClusterSize uint32
|
||||
BlocksPerGroup uint32
|
||||
ClustersPerGroup uint32
|
||||
InodesPerGroup uint32
|
||||
Mtime uint32
|
||||
Wtime uint32
|
||||
MountCount uint16
|
||||
MaxMountCount uint16
|
||||
Magic uint16
|
||||
State uint16
|
||||
Errors uint16
|
||||
MinorRevisionLevel uint16
|
||||
LastCheck uint32
|
||||
CheckInterval uint32
|
||||
CreatorOS uint32
|
||||
RevisionLevel uint32
|
||||
DefaultReservedUid uint16
|
||||
DefaultReservedGid uint16
|
||||
FirstInode uint32
|
||||
InodeSize uint16
|
||||
BlockGroupNr uint16
|
||||
FeatureCompat CompatFeature
|
||||
FeatureIncompat IncompatFeature
|
||||
FeatureRoCompat RoCompatFeature
|
||||
UUID [16]uint8
|
||||
VolumeName [16]byte
|
||||
LastMounted [64]byte
|
||||
AlgorithmUsageBitmap uint32
|
||||
PreallocBlocks uint8
|
||||
PreallocDirBlocks uint8
|
||||
ReservedGdtBlocks uint16
|
||||
JournalUUID [16]uint8
|
||||
JournalInum uint32
|
||||
JournalDev uint32
|
||||
LastOrphan uint32
|
||||
HashSeed [4]uint32
|
||||
DefHashVersion uint8
|
||||
JournalBackupType uint8
|
||||
DescSize uint16
|
||||
DefaultMountOpts uint32
|
||||
FirstMetaBg uint32
|
||||
MkfsTime uint32
|
||||
JournalBlocks [17]uint32
|
||||
BlocksCountHigh uint32
|
||||
RBlocksCountHigh uint32
|
||||
FreeBlocksCountHigh uint32
|
||||
MinExtraIsize uint16
|
||||
WantExtraIsize uint16
|
||||
Flags uint32
|
||||
RaidStride uint16
|
||||
MmpInterval uint16
|
||||
MmpBlock uint64
|
||||
RaidStripeWidth uint32
|
||||
LogGroupsPerFlex uint8
|
||||
ChecksumType uint8
|
||||
ReservedPad uint16
|
||||
KbytesWritten uint64
|
||||
SnapshotInum uint32
|
||||
SnapshotID uint32
|
||||
SnapshotRBlocksCount uint64
|
||||
SnapshotList uint32
|
||||
ErrorCount uint32
|
||||
FirstErrorTime uint32
|
||||
FirstErrorInode uint32
|
||||
FirstErrorBlock uint64
|
||||
FirstErrorFunc [32]uint8
|
||||
FirstErrorLine uint32
|
||||
LastErrorTime uint32
|
||||
LastErrorInode uint32
|
||||
LastErrorLine uint32
|
||||
LastErrorBlock uint64
|
||||
LastErrorFunc [32]uint8
|
||||
MountOpts [64]uint8
|
||||
UserQuotaInum uint32
|
||||
GroupQuotaInum uint32
|
||||
OverheadBlocks uint32
|
||||
BackupBgs [2]uint32
|
||||
EncryptAlgos [4]uint8
|
||||
EncryptPwSalt [16]uint8
|
||||
LpfInode uint32
|
||||
ProjectQuotaInum uint32
|
||||
ChecksumSeed uint32
|
||||
WtimeHigh uint8
|
||||
MtimeHigh uint8
|
||||
MkfsTimeHigh uint8
|
||||
LastcheckHigh uint8
|
||||
FirstErrorTimeHigh uint8
|
||||
LastErrorTimeHigh uint8
|
||||
Pad [2]uint8
|
||||
Reserved [96]uint32
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
const SuperBlockMagic uint16 = 0xef53
|
||||
|
||||
type CompatFeature uint32
|
||||
type IncompatFeature uint32
|
||||
type RoCompatFeature uint32
|
||||
|
||||
const (
|
||||
CompatDirPrealloc CompatFeature = 0x1
|
||||
CompatImagicInodes CompatFeature = 0x2
|
||||
CompatHasJournal CompatFeature = 0x4
|
||||
CompatExtAttr CompatFeature = 0x8
|
||||
CompatResizeInode CompatFeature = 0x10
|
||||
CompatDirIndex CompatFeature = 0x20
|
||||
CompatLazyBg CompatFeature = 0x40
|
||||
CompatExcludeInode CompatFeature = 0x80
|
||||
CompatExcludeBitmap CompatFeature = 0x100
|
||||
CompatSparseSuper2 CompatFeature = 0x200
|
||||
|
||||
IncompatCompression IncompatFeature = 0x1
|
||||
IncompatFiletype IncompatFeature = 0x2
|
||||
IncompatRecover IncompatFeature = 0x4
|
||||
IncompatJournalDev IncompatFeature = 0x8
|
||||
IncompatMetaBg IncompatFeature = 0x10
|
||||
IncompatExtents IncompatFeature = 0x40
|
||||
Incompat_64Bit IncompatFeature = 0x80
|
||||
IncompatMmp IncompatFeature = 0x100
|
||||
IncompatFlexBg IncompatFeature = 0x200
|
||||
IncompatEaInode IncompatFeature = 0x400
|
||||
IncompatDirdata IncompatFeature = 0x1000
|
||||
IncompatCsumSeed IncompatFeature = 0x2000
|
||||
IncompatLargedir IncompatFeature = 0x4000
|
||||
IncompatInlineData IncompatFeature = 0x8000
|
||||
IncompatEncrypt IncompatFeature = 0x10000
|
||||
|
||||
RoCompatSparseSuper RoCompatFeature = 0x1
|
||||
RoCompatLargeFile RoCompatFeature = 0x2
|
||||
RoCompatBtreeDir RoCompatFeature = 0x4
|
||||
RoCompatHugeFile RoCompatFeature = 0x8
|
||||
RoCompatGdtCsum RoCompatFeature = 0x10
|
||||
RoCompatDirNlink RoCompatFeature = 0x20
|
||||
RoCompatExtraIsize RoCompatFeature = 0x40
|
||||
RoCompatHasSnapshot RoCompatFeature = 0x80
|
||||
RoCompatQuota RoCompatFeature = 0x100
|
||||
RoCompatBigalloc RoCompatFeature = 0x200
|
||||
RoCompatMetadataCsum RoCompatFeature = 0x400
|
||||
RoCompatReplica RoCompatFeature = 0x800
|
||||
RoCompatReadonly RoCompatFeature = 0x1000
|
||||
RoCompatProject RoCompatFeature = 0x2000
|
||||
)
|
||||
|
||||
type BlockGroupFlag uint16
|
||||
|
||||
const (
|
||||
BlockGroupInodeUninit BlockGroupFlag = 0x1
|
||||
BlockGroupBlockUninit BlockGroupFlag = 0x2
|
||||
BlockGroupInodeZeroed BlockGroupFlag = 0x4
|
||||
)
|
||||
|
||||
type GroupDescriptor struct {
|
||||
BlockBitmapLow uint32
|
||||
InodeBitmapLow uint32
|
||||
InodeTableLow uint32
|
||||
FreeBlocksCountLow uint16
|
||||
FreeInodesCountLow uint16
|
||||
UsedDirsCountLow uint16
|
||||
Flags BlockGroupFlag
|
||||
ExcludeBitmapLow uint32
|
||||
BlockBitmapCsumLow uint16
|
||||
InodeBitmapCsumLow uint16
|
||||
ItableUnusedLow uint16
|
||||
Checksum uint16
|
||||
}
|
||||
|
||||
type GroupDescriptor64 struct {
|
||||
GroupDescriptor
|
||||
BlockBitmapHigh uint32
|
||||
InodeBitmapHigh uint32
|
||||
InodeTableHigh uint32
|
||||
FreeBlocksCountHigh uint16
|
||||
FreeInodesCountHigh uint16
|
||||
UsedDirsCountHigh uint16
|
||||
ItableUnusedHigh uint16
|
||||
ExcludeBitmapHigh uint32
|
||||
BlockBitmapCsumHigh uint16
|
||||
InodeBitmapCsumHigh uint16
|
||||
Reserved uint32
|
||||
}
|
||||
|
||||
const (
|
||||
S_IXOTH = 0x1
|
||||
S_IWOTH = 0x2
|
||||
S_IROTH = 0x4
|
||||
S_IXGRP = 0x8
|
||||
S_IWGRP = 0x10
|
||||
S_IRGRP = 0x20
|
||||
S_IXUSR = 0x40
|
||||
S_IWUSR = 0x80
|
||||
S_IRUSR = 0x100
|
||||
S_ISVTX = 0x200
|
||||
S_ISGID = 0x400
|
||||
S_ISUID = 0x800
|
||||
S_IFIFO = 0x1000
|
||||
S_IFCHR = 0x2000
|
||||
S_IFDIR = 0x4000
|
||||
S_IFBLK = 0x6000
|
||||
S_IFREG = 0x8000
|
||||
S_IFLNK = 0xA000
|
||||
S_IFSOCK = 0xC000
|
||||
|
||||
TypeMask uint16 = 0xF000
|
||||
)
|
||||
|
||||
type InodeNumber uint32
|
||||
|
||||
const (
|
||||
InodeRoot = 2
|
||||
)
|
||||
|
||||
type Inode struct {
|
||||
Mode uint16
|
||||
Uid uint16
|
||||
SizeLow uint32
|
||||
Atime uint32
|
||||
Ctime uint32
|
||||
Mtime uint32
|
||||
Dtime uint32
|
||||
Gid uint16
|
||||
LinksCount uint16
|
||||
BlocksLow uint32
|
||||
Flags InodeFlag
|
||||
Version uint32
|
||||
Block [60]byte
|
||||
Generation uint32
|
||||
XattrBlockLow uint32
|
||||
SizeHigh uint32
|
||||
ObsoleteFragmentAddr uint32
|
||||
BlocksHigh uint16
|
||||
XattrBlockHigh uint16
|
||||
UidHigh uint16
|
||||
GidHigh uint16
|
||||
ChecksumLow uint16
|
||||
Reserved uint16
|
||||
ExtraIsize uint16
|
||||
ChecksumHigh uint16
|
||||
CtimeExtra uint32
|
||||
MtimeExtra uint32
|
||||
AtimeExtra uint32
|
||||
Crtime uint32
|
||||
CrtimeExtra uint32
|
||||
VersionHigh uint32
|
||||
Projid uint32
|
||||
}
|
||||
|
||||
type InodeFlag uint32
|
||||
|
||||
const (
|
||||
InodeFlagSecRm InodeFlag = 0x1
|
||||
InodeFlagUnRm InodeFlag = 0x2
|
||||
InodeFlagCompressed InodeFlag = 0x4
|
||||
InodeFlagSync InodeFlag = 0x8
|
||||
InodeFlagImmutable InodeFlag = 0x10
|
||||
InodeFlagAppend InodeFlag = 0x20
|
||||
InodeFlagNoDump InodeFlag = 0x40
|
||||
InodeFlagNoAtime InodeFlag = 0x80
|
||||
InodeFlagDirtyCompressed InodeFlag = 0x100
|
||||
InodeFlagCompressedClusters InodeFlag = 0x200
|
||||
InodeFlagNoCompress InodeFlag = 0x400
|
||||
InodeFlagEncrypted InodeFlag = 0x800
|
||||
InodeFlagHashedIndex InodeFlag = 0x1000
|
||||
InodeFlagMagic InodeFlag = 0x2000
|
||||
InodeFlagJournalData InodeFlag = 0x4000
|
||||
InodeFlagNoTail InodeFlag = 0x8000
|
||||
InodeFlagDirSync InodeFlag = 0x10000
|
||||
InodeFlagTopDir InodeFlag = 0x20000
|
||||
InodeFlagHugeFile InodeFlag = 0x40000
|
||||
InodeFlagExtents InodeFlag = 0x80000
|
||||
InodeFlagEaInode InodeFlag = 0x200000
|
||||
InodeFlagEOFBlocks InodeFlag = 0x400000
|
||||
InodeFlagSnapfile InodeFlag = 0x01000000
|
||||
InodeFlagSnapfileDeleted InodeFlag = 0x04000000
|
||||
InodeFlagSnapfileShrunk InodeFlag = 0x08000000
|
||||
InodeFlagInlineData InodeFlag = 0x10000000
|
||||
InodeFlagProjectIDInherit InodeFlag = 0x20000000
|
||||
InodeFlagReserved InodeFlag = 0x80000000
|
||||
)
|
||||
|
||||
const (
|
||||
MaxLinks = 65000
|
||||
)
|
||||
|
||||
type ExtentHeader struct {
|
||||
Magic uint16
|
||||
Entries uint16
|
||||
Max uint16
|
||||
Depth uint16
|
||||
Generation uint32
|
||||
}
|
||||
|
||||
const ExtentHeaderMagic uint16 = 0xf30a
|
||||
|
||||
type ExtentIndexNode struct {
|
||||
Block uint32
|
||||
LeafLow uint32
|
||||
LeafHigh uint16
|
||||
Unused uint16
|
||||
}
|
||||
|
||||
type ExtentLeafNode struct {
|
||||
Block uint32
|
||||
Length uint16
|
||||
StartHigh uint16
|
||||
StartLow uint32
|
||||
}
|
||||
|
||||
type ExtentTail struct {
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
type DirectoryEntry struct {
|
||||
Inode InodeNumber
|
||||
RecordLength uint16
|
||||
NameLength uint8
|
||||
FileType FileType
|
||||
//Name []byte
|
||||
}
|
||||
|
||||
type FileType uint8
|
||||
|
||||
const (
|
||||
FileTypeUnknown FileType = 0x0
|
||||
FileTypeRegular FileType = 0x1
|
||||
FileTypeDirectory FileType = 0x2
|
||||
FileTypeCharacter FileType = 0x3
|
||||
FileTypeBlock FileType = 0x4
|
||||
FileTypeFIFO FileType = 0x5
|
||||
FileTypeSocket FileType = 0x6
|
||||
FileTypeSymbolicLink FileType = 0x7
|
||||
)
|
||||
|
||||
type DirectoryEntryTail struct {
|
||||
ReservedZero1 uint32
|
||||
RecordLength uint16
|
||||
ReservedZero2 uint8
|
||||
FileType uint8
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
type DirectoryTreeRoot struct {
|
||||
Dot DirectoryEntry
|
||||
DotName [4]byte
|
||||
DotDot DirectoryEntry
|
||||
DotDotName [4]byte
|
||||
ReservedZero uint32
|
||||
HashVersion uint8
|
||||
InfoLength uint8
|
||||
IndirectLevels uint8
|
||||
UnusedFlags uint8
|
||||
Limit uint16
|
||||
Count uint16
|
||||
Block uint32
|
||||
//Entries []DirectoryTreeEntry
|
||||
}
|
||||
|
||||
type DirectoryTreeNode struct {
|
||||
FakeInode uint32
|
||||
FakeRecordLength uint16
|
||||
NameLength uint8
|
||||
FileType uint8
|
||||
Limit uint16
|
||||
Count uint16
|
||||
Block uint32
|
||||
//Entries []DirectoryTreeEntry
|
||||
}
|
||||
|
||||
type DirectoryTreeEntry struct {
|
||||
Hash uint32
|
||||
Block uint32
|
||||
}
|
||||
|
||||
type DirectoryTreeTail struct {
|
||||
Reserved uint32
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
type XAttrInodeBodyHeader struct {
|
||||
Magic uint32
|
||||
}
|
||||
|
||||
type XAttrHeader struct {
|
||||
Magic uint32
|
||||
ReferenceCount uint32
|
||||
Blocks uint32
|
||||
Hash uint32
|
||||
Checksum uint32
|
||||
Reserved [3]uint32
|
||||
}
|
||||
|
||||
const XAttrHeaderMagic uint32 = 0xea020000
|
||||
|
||||
type XAttrEntry struct {
|
||||
NameLength uint8
|
||||
NameIndex uint8
|
||||
ValueOffset uint16
|
||||
ValueInum uint32
|
||||
ValueSize uint32
|
||||
Hash uint32
|
||||
//Name []byte
|
||||
}
|
174
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
174
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
@ -1,174 +0,0 @@
|
||||
package tar2ext4
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
|
||||
)
|
||||
|
||||
type params struct {
|
||||
convertWhiteout bool
|
||||
appendVhdFooter bool
|
||||
ext4opts []compactext4.Option
|
||||
}
|
||||
|
||||
// Option is the type for optional parameters to Convert.
|
||||
type Option func(*params)
|
||||
|
||||
// ConvertWhiteout instructs the converter to convert OCI-style whiteouts
|
||||
// (beginning with .wh.) to overlay-style whiteouts.
|
||||
func ConvertWhiteout(p *params) {
|
||||
p.convertWhiteout = true
|
||||
}
|
||||
|
||||
// AppendVhdFooter instructs the converter to add a fixed VHD footer to the
|
||||
// file.
|
||||
func AppendVhdFooter(p *params) {
|
||||
p.appendVhdFooter = true
|
||||
}
|
||||
|
||||
// InlineData instructs the converter to write small files into the inode
|
||||
// structures directly. This creates smaller images but currently is not
|
||||
// compatible with DAX.
|
||||
func InlineData(p *params) {
|
||||
p.ext4opts = append(p.ext4opts, compactext4.InlineData)
|
||||
}
|
||||
|
||||
// MaximumDiskSize instructs the writer to limit the disk size to the specified
|
||||
// value. This also reserves enough metadata space for the specified disk size.
|
||||
// If not provided, then 16GB is the default.
|
||||
func MaximumDiskSize(size int64) Option {
|
||||
return func(p *params) {
|
||||
p.ext4opts = append(p.ext4opts, compactext4.MaximumDiskSize(size))
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
whiteoutPrefix = ".wh."
|
||||
opaqueWhiteout = ".wh..wh..opq"
|
||||
)
|
||||
|
||||
// Convert writes a compact ext4 file system image that contains the files in the
|
||||
// input tar stream.
|
||||
func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
||||
var p params
|
||||
for _, opt := range options {
|
||||
opt(&p)
|
||||
}
|
||||
t := tar.NewReader(bufio.NewReader(r))
|
||||
fs := compactext4.NewWriter(w, p.ext4opts...)
|
||||
for {
|
||||
hdr, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.convertWhiteout {
|
||||
dir, name := path.Split(hdr.Name)
|
||||
if strings.HasPrefix(name, whiteoutPrefix) {
|
||||
if name == opaqueWhiteout {
|
||||
// Update the directory with the appropriate xattr.
|
||||
f, err := fs.Stat(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Xattrs["trusted.overlay.opaque"] = []byte("y")
|
||||
err = fs.Create(dir, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Create an overlay-style whiteout.
|
||||
f := &compactext4.File{
|
||||
Mode: compactext4.S_IFCHR,
|
||||
Devmajor: 0,
|
||||
Devminor: 0,
|
||||
}
|
||||
err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
err = fs.Link(hdr.Linkname, hdr.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
f := &compactext4.File{
|
||||
Mode: uint16(hdr.Mode),
|
||||
Atime: hdr.AccessTime,
|
||||
Mtime: hdr.ModTime,
|
||||
Ctime: hdr.ChangeTime,
|
||||
Crtime: hdr.ModTime,
|
||||
Size: hdr.Size,
|
||||
Uid: uint32(hdr.Uid),
|
||||
Gid: uint32(hdr.Gid),
|
||||
Linkname: hdr.Linkname,
|
||||
Devmajor: uint32(hdr.Devmajor),
|
||||
Devminor: uint32(hdr.Devminor),
|
||||
Xattrs: make(map[string][]byte),
|
||||
}
|
||||
for key, value := range hdr.PAXRecords {
|
||||
const xattrPrefix = "SCHILY.xattr."
|
||||
if strings.HasPrefix(key, xattrPrefix) {
|
||||
f.Xattrs[key[len(xattrPrefix):]] = []byte(value)
|
||||
}
|
||||
}
|
||||
|
||||
var typ uint16
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
typ = compactext4.S_IFREG
|
||||
case tar.TypeSymlink:
|
||||
typ = compactext4.S_IFLNK
|
||||
case tar.TypeChar:
|
||||
typ = compactext4.S_IFCHR
|
||||
case tar.TypeBlock:
|
||||
typ = compactext4.S_IFBLK
|
||||
case tar.TypeDir:
|
||||
typ = compactext4.S_IFDIR
|
||||
case tar.TypeFifo:
|
||||
typ = compactext4.S_IFIFO
|
||||
}
|
||||
f.Mode &= ^compactext4.TypeMask
|
||||
f.Mode |= typ
|
||||
err = fs.Create(hdr.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(fs, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
err := fs.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p.appendVhdFooter {
|
||||
size, err := w.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
76
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go
generated
vendored
76
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
package tar2ext4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// Constants for the VHD footer
|
||||
const (
|
||||
cookieMagic = "conectix"
|
||||
featureMask = 0x2
|
||||
fileFormatVersionMagic = 0x00010000
|
||||
fixedDataOffset = -1
|
||||
creatorVersionMagic = 0x000a0000
|
||||
diskTypeFixed = 2
|
||||
)
|
||||
|
||||
type vhdFooter struct {
|
||||
Cookie [8]byte
|
||||
Features uint32
|
||||
FileFormatVersion uint32
|
||||
DataOffset int64
|
||||
TimeStamp uint32
|
||||
CreatorApplication [4]byte
|
||||
CreatorVersion uint32
|
||||
CreatorHostOS [4]byte
|
||||
OriginalSize int64
|
||||
CurrentSize int64
|
||||
DiskGeometry uint32
|
||||
DiskType uint32
|
||||
Checksum uint32
|
||||
UniqueID [16]uint8
|
||||
SavedState uint8
|
||||
Reserved [427]uint8
|
||||
}
|
||||
|
||||
func makeFixedVHDFooter(size int64) *vhdFooter {
|
||||
footer := &vhdFooter{
|
||||
Features: featureMask,
|
||||
FileFormatVersion: fileFormatVersionMagic,
|
||||
DataOffset: fixedDataOffset,
|
||||
CreatorVersion: creatorVersionMagic,
|
||||
OriginalSize: size,
|
||||
CurrentSize: size,
|
||||
DiskType: diskTypeFixed,
|
||||
UniqueID: generateUUID(),
|
||||
}
|
||||
copy(footer.Cookie[:], cookieMagic)
|
||||
footer.Checksum = calculateCheckSum(footer)
|
||||
return footer
|
||||
}
|
||||
|
||||
func calculateCheckSum(footer *vhdFooter) uint32 {
|
||||
oldchk := footer.Checksum
|
||||
footer.Checksum = 0
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
binary.Write(buf, binary.BigEndian, footer)
|
||||
|
||||
var chk uint32
|
||||
bufBytes := buf.Bytes()
|
||||
for i := 0; i < len(bufBytes); i++ {
|
||||
chk += uint32(bufBytes[i])
|
||||
}
|
||||
footer.Checksum = oldchk
|
||||
return uint32(^chk)
|
||||
}
|
||||
|
||||
func generateUUID() [16]byte {
|
||||
res := [16]byte{}
|
||||
if _, err := rand.Read(res[:]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
35
vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
35
vendor/github.com/Microsoft/hcsshim/go.mod
generated
vendored
@ -1,35 +0,0 @@
|
||||
module github.com/Microsoft/hcsshim
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1
|
||||
github.com/containerd/containerd v1.3.2
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/protobuf v1.3.2 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
|
||||
google.golang.org/grpc v1.23.1
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
)
|
223
vendor/github.com/Microsoft/hcsshim/hcn/hcn.go
generated
vendored
223
vendor/github.com/Microsoft/hcsshim/hcn/hcn.go
generated
vendored
@ -1,223 +0,0 @@
|
||||
// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server
|
||||
// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS).
|
||||
package hcn
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go
|
||||
|
||||
/// HNS V1 API
|
||||
|
||||
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
|
||||
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
|
||||
|
||||
/// HCN V2 API
|
||||
|
||||
// Network
|
||||
//sys hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNetworks?
|
||||
//sys hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnCreateNetwork?
|
||||
//sys hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnOpenNetwork?
|
||||
//sys hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNetwork?
|
||||
//sys hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNetworkProperties?
|
||||
//sys hcnDeleteNetwork(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNetwork?
|
||||
//sys hcnCloseNetwork(network hcnNetwork) (hr error) = computenetwork.HcnCloseNetwork?
|
||||
|
||||
// Endpoint
|
||||
//sys hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateEndpoints?
|
||||
//sys hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnCreateEndpoint?
|
||||
//sys hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnOpenEndpoint?
|
||||
//sys hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) = computenetwork.HcnModifyEndpoint?
|
||||
//sys hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryEndpointProperties?
|
||||
//sys hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteEndpoint?
|
||||
//sys hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) = computenetwork.HcnCloseEndpoint?
|
||||
|
||||
// Namespace
|
||||
//sys hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNamespaces?
|
||||
//sys hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnCreateNamespace?
|
||||
//sys hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnOpenNamespace?
|
||||
//sys hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNamespace?
|
||||
//sys hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNamespaceProperties?
|
||||
//sys hcnDeleteNamespace(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNamespace?
|
||||
//sys hcnCloseNamespace(namespace hcnNamespace) (hr error) = computenetwork.HcnCloseNamespace?
|
||||
|
||||
// LoadBalancer
|
||||
//sys hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateLoadBalancers?
|
||||
//sys hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnCreateLoadBalancer?
|
||||
//sys hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnOpenLoadBalancer?
|
||||
//sys hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) = computenetwork.HcnModifyLoadBalancer?
|
||||
//sys hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryLoadBalancerProperties?
|
||||
//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer?
|
||||
//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer?
|
||||
|
||||
// SDN Routes
|
||||
//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes?
|
||||
//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute?
|
||||
//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute?
|
||||
//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute?
|
||||
//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties?
|
||||
//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute?
|
||||
//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute?
|
||||
|
||||
// Service
|
||||
//sys hcnOpenService(service *hcnService, result **uint16) (hr error) = computenetwork.HcnOpenService?
|
||||
//sys hcnRegisterServiceCallback(service hcnService, callback int32, context int32, callbackHandle *hcnCallbackHandle) (hr error) = computenetwork.HcnRegisterServiceCallback?
|
||||
//sys hcnUnregisterServiceCallback(callbackHandle hcnCallbackHandle) (hr error) = computenetwork.HcnUnregisterServiceCallback?
|
||||
//sys hcnCloseService(service hcnService) (hr error) = computenetwork.HcnCloseService?
|
||||
|
||||
type _guid = guid.GUID
|
||||
|
||||
type hcnNetwork syscall.Handle
|
||||
type hcnEndpoint syscall.Handle
|
||||
type hcnNamespace syscall.Handle
|
||||
type hcnLoadBalancer syscall.Handle
|
||||
type hcnRoute syscall.Handle
|
||||
type hcnService syscall.Handle
|
||||
type hcnCallbackHandle syscall.Handle
|
||||
|
||||
// SchemaVersion for HCN Objects/Queries.
|
||||
type SchemaVersion = Version // hcnglobals.go
|
||||
|
||||
// HostComputeQueryFlags are passed in to a HostComputeQuery to determine which
|
||||
// properties of an object are returned.
|
||||
type HostComputeQueryFlags uint32
|
||||
|
||||
var (
|
||||
// HostComputeQueryFlagsNone returns an object with the standard properties.
|
||||
HostComputeQueryFlagsNone HostComputeQueryFlags
|
||||
// HostComputeQueryFlagsDetailed returns an object with all properties.
|
||||
HostComputeQueryFlagsDetailed HostComputeQueryFlags = 1
|
||||
)
|
||||
|
||||
// HostComputeQuery is the format for HCN queries.
|
||||
type HostComputeQuery struct {
|
||||
SchemaVersion SchemaVersion `json:""`
|
||||
Flags HostComputeQueryFlags `json:",omitempty"`
|
||||
Filter string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// defaultQuery generates HCN Query.
|
||||
// Passed into get/enumerate calls to filter results.
|
||||
func defaultQuery() HostComputeQuery {
|
||||
query := HostComputeQuery{
|
||||
SchemaVersion: SchemaVersion{
|
||||
Major: 2,
|
||||
Minor: 0,
|
||||
},
|
||||
Flags: HostComputeQueryFlagsNone,
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
func defaultQueryJson() string {
|
||||
query := defaultQuery()
|
||||
queryJson, err := json.Marshal(query)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(queryJson)
|
||||
}
|
||||
|
||||
// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS
|
||||
func platformDoesNotSupportError(featureName string) error {
|
||||
return fmt.Errorf("Platform does not support feature %s", featureName)
|
||||
}
|
||||
|
||||
// V2ApiSupported returns an error if the HCN version does not support the V2 Apis.
|
||||
func V2ApiSupported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.Api.V2 {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("V2 Api/Schema")
|
||||
}
|
||||
|
||||
func V2SchemaVersion() SchemaVersion {
|
||||
return SchemaVersion{
|
||||
Major: 2,
|
||||
Minor: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies.
|
||||
func RemoteSubnetSupported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.RemoteSubnet {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("Remote Subnet")
|
||||
}
|
||||
|
||||
// HostRouteSupported returns an error if the HCN version does not support Host Route policies.
|
||||
func HostRouteSupported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.HostRoute {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("Host Route")
|
||||
}
|
||||
|
||||
// DSRSupported returns an error if the HCN version does not support Direct Server Return.
|
||||
func DSRSupported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.DSR {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("Direct Server Return (DSR)")
|
||||
}
|
||||
|
||||
// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes.
|
||||
func Slash32EndpointPrefixesSupported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.Slash32EndpointPrefixes {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("Slash 32 Endpoint prefixes")
|
||||
}
|
||||
|
||||
// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN.
|
||||
func AclSupportForProtocol252Supported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.AclSupportForProtocol252 {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN")
|
||||
}
|
||||
|
||||
// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity.
|
||||
func SessionAffinitySupported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.SessionAffinity {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("Session Affinity")
|
||||
}
|
||||
|
||||
// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack.
|
||||
func IPv6DualStackSupported() error {
|
||||
supported := GetSupportedFeatures()
|
||||
if supported.IPv6DualStack {
|
||||
return nil
|
||||
}
|
||||
return platformDoesNotSupportError("IPv6 DualStack")
|
||||
}
|
||||
|
||||
// RequestType are the different operations performed to settings.
|
||||
// Used to update the settings of Endpoint/Namespace objects.
|
||||
type RequestType string
|
||||
|
||||
var (
|
||||
// RequestTypeAdd adds the provided settings object.
|
||||
RequestTypeAdd RequestType = "Add"
|
||||
// RequestTypeRemove removes the provided settings object.
|
||||
RequestTypeRemove RequestType = "Remove"
|
||||
// RequestTypeUpdate replaces settings with the ones provided.
|
||||
RequestTypeUpdate RequestType = "Update"
|
||||
// RequestTypeRefresh refreshes the settings provided.
|
||||
RequestTypeRefresh RequestType = "Refresh"
|
||||
)
|
380
vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go
generated
vendored
380
vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go
generated
vendored
@ -1,380 +0,0 @@
|
||||
package hcn
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// IpConfig is assoicated with an endpoint
|
||||
type IpConfig struct {
|
||||
IpAddress string `json:",omitempty"`
|
||||
PrefixLength uint8 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EndpointFlags are special settings on an endpoint.
|
||||
type EndpointFlags uint32
|
||||
|
||||
var (
|
||||
// EndpointFlagsNone is the default.
|
||||
EndpointFlagsNone EndpointFlags
|
||||
// EndpointFlagsRemoteEndpoint means that an endpoint is on another host.
|
||||
EndpointFlagsRemoteEndpoint EndpointFlags = 1
|
||||
)
|
||||
|
||||
// HostComputeEndpoint represents a network endpoint
|
||||
type HostComputeEndpoint struct {
|
||||
Id string `json:"ID,omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
HostComputeNetwork string `json:",omitempty"` // GUID
|
||||
HostComputeNamespace string `json:",omitempty"` // GUID
|
||||
Policies []EndpointPolicy `json:",omitempty"`
|
||||
IpConfigurations []IpConfig `json:",omitempty"`
|
||||
Dns Dns `json:",omitempty"`
|
||||
Routes []Route `json:",omitempty"`
|
||||
MacAddress string `json:",omitempty"`
|
||||
Flags EndpointFlags `json:",omitempty"`
|
||||
SchemaVersion SchemaVersion `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EndpointResourceType are the two different Endpoint settings resources.
|
||||
type EndpointResourceType string
|
||||
|
||||
var (
|
||||
// EndpointResourceTypePolicy is for Endpoint Policies. Ex: ACL, NAT
|
||||
EndpointResourceTypePolicy EndpointResourceType = "Policy"
|
||||
// EndpointResourceTypePort is for Endpoint Port settings.
|
||||
EndpointResourceTypePort EndpointResourceType = "Port"
|
||||
)
|
||||
|
||||
// ModifyEndpointSettingRequest is the structure used to send request to modify an endpoint.
|
||||
// Used to update policy/port on an endpoint.
|
||||
type ModifyEndpointSettingRequest struct {
|
||||
ResourceType EndpointResourceType `json:",omitempty"` // Policy, Port
|
||||
RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh
|
||||
Settings json.RawMessage `json:",omitempty"`
|
||||
}
|
||||
|
||||
type PolicyEndpointRequest struct {
|
||||
Policies []EndpointPolicy `json:",omitempty"`
|
||||
}
|
||||
|
||||
func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) {
|
||||
// Open endpoint.
|
||||
var (
|
||||
endpointHandle hcnEndpoint
|
||||
resultBuffer *uint16
|
||||
propertiesBuffer *uint16
|
||||
)
|
||||
hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer)
|
||||
if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Query endpoint.
|
||||
hr = hcnQueryEndpointProperties(endpointHandle, query, &propertiesBuffer, &resultBuffer)
|
||||
if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
|
||||
// Close endpoint.
|
||||
hr = hcnCloseEndpoint(endpointHandle)
|
||||
if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert output to HostComputeEndpoint
|
||||
var outputEndpoint HostComputeEndpoint
|
||||
if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &outputEndpoint, nil
|
||||
}
|
||||
|
||||
func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) {
|
||||
// Enumerate all Endpoint Guids
|
||||
var (
|
||||
resultBuffer *uint16
|
||||
endpointBuffer *uint16
|
||||
)
|
||||
hr := hcnEnumerateEndpoints(query, &endpointBuffer, &resultBuffer)
|
||||
if err := checkForErrors("hcnEnumerateEndpoints", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoints := interop.ConvertAndFreeCoTaskMemString(endpointBuffer)
|
||||
var endpointIds []guid.GUID
|
||||
err := json.Unmarshal([]byte(endpoints), &endpointIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var outputEndpoints []HostComputeEndpoint
|
||||
for _, endpointGuid := range endpointIds {
|
||||
endpoint, err := getEndpoint(endpointGuid, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outputEndpoints = append(outputEndpoints, *endpoint)
|
||||
}
|
||||
return outputEndpoints, nil
|
||||
}
|
||||
|
||||
func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) {
|
||||
networkGuid, err := guid.FromString(networkId)
|
||||
if err != nil {
|
||||
return nil, errInvalidNetworkID
|
||||
}
|
||||
// Open network.
|
||||
var networkHandle hcnNetwork
|
||||
var resultBuffer *uint16
|
||||
hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer)
|
||||
if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create endpoint.
|
||||
endpointId := guid.GUID{}
|
||||
var endpointHandle hcnEndpoint
|
||||
hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer)
|
||||
if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Query endpoint.
|
||||
hcnQuery := defaultQuery()
|
||||
query, err := json.Marshal(hcnQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var propertiesBuffer *uint16
|
||||
hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer)
|
||||
if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
|
||||
// Close endpoint.
|
||||
hr = hcnCloseEndpoint(endpointHandle)
|
||||
if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Close network.
|
||||
hr = hcnCloseNetwork(networkHandle)
|
||||
if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert output to HostComputeEndpoint
|
||||
var outputEndpoint HostComputeEndpoint
|
||||
if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &outputEndpoint, nil
|
||||
}
|
||||
|
||||
func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) {
|
||||
endpointGuid, err := guid.FromString(endpointId)
|
||||
if err != nil {
|
||||
return nil, errInvalidEndpointID
|
||||
}
|
||||
// Open endpoint
|
||||
var (
|
||||
endpointHandle hcnEndpoint
|
||||
resultBuffer *uint16
|
||||
propertiesBuffer *uint16
|
||||
)
|
||||
hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer)
|
||||
if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Modify endpoint
|
||||
hr = hcnModifyEndpoint(endpointHandle, settings, &resultBuffer)
|
||||
if err := checkForErrors("hcnModifyEndpoint", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Query endpoint.
|
||||
hcnQuery := defaultQuery()
|
||||
query, err := json.Marshal(hcnQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer)
|
||||
if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
|
||||
// Close endpoint.
|
||||
hr = hcnCloseEndpoint(endpointHandle)
|
||||
if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert output to HostComputeEndpoint
|
||||
var outputEndpoint HostComputeEndpoint
|
||||
if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &outputEndpoint, nil
|
||||
}
|
||||
|
||||
func deleteEndpoint(endpointId string) error {
|
||||
endpointGuid, err := guid.FromString(endpointId)
|
||||
if err != nil {
|
||||
return errInvalidEndpointID
|
||||
}
|
||||
var resultBuffer *uint16
|
||||
hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer)
|
||||
if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListEndpoints makes a call to list all available endpoints.
|
||||
func ListEndpoints() ([]HostComputeEndpoint, error) {
|
||||
hcnQuery := defaultQuery()
|
||||
endpoints, err := ListEndpointsQuery(hcnQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// ListEndpointsQuery makes a call to query the list of available endpoints.
|
||||
func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) {
|
||||
queryJson, err := json.Marshal(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoints, err := enumerateEndpoints(string(queryJson))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// ListEndpointsOfNetwork queries the list of endpoints on a network.
|
||||
func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) {
|
||||
hcnQuery := defaultQuery()
|
||||
// TODO: Once query can convert schema, change to {HostComputeNetwork:networkId}
|
||||
mapA := map[string]string{"VirtualNetwork": networkId}
|
||||
filter, err := json.Marshal(mapA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hcnQuery.Filter = string(filter)
|
||||
|
||||
return ListEndpointsQuery(hcnQuery)
|
||||
}
|
||||
|
||||
// GetEndpointByID returns an endpoint specified by Id
|
||||
func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) {
|
||||
hcnQuery := defaultQuery()
|
||||
mapA := map[string]string{"ID": endpointId}
|
||||
filter, err := json.Marshal(mapA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hcnQuery.Filter = string(filter)
|
||||
|
||||
endpoints, err := ListEndpointsQuery(hcnQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
return nil, EndpointNotFoundError{EndpointID: endpointId}
|
||||
}
|
||||
return &endpoints[0], err
|
||||
}
|
||||
|
||||
// GetEndpointByName returns an endpoint specified by Name
|
||||
func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) {
|
||||
hcnQuery := defaultQuery()
|
||||
mapA := map[string]string{"Name": endpointName}
|
||||
filter, err := json.Marshal(mapA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hcnQuery.Filter = string(filter)
|
||||
|
||||
endpoints, err := ListEndpointsQuery(hcnQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
return nil, EndpointNotFoundError{EndpointName: endpointName}
|
||||
}
|
||||
return &endpoints[0], err
|
||||
}
|
||||
|
||||
// Create Endpoint.
|
||||
func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) {
|
||||
logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id)
|
||||
|
||||
if endpoint.HostComputeNamespace != "" {
|
||||
return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set")
|
||||
}
|
||||
|
||||
jsonString, err := json.Marshal(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString)
|
||||
endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString))
|
||||
if hcnErr != nil {
|
||||
return nil, hcnErr
|
||||
}
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// Delete Endpoint.
|
||||
func (endpoint *HostComputeEndpoint) Delete() error {
|
||||
logrus.Debugf("hcn::HostComputeEndpoint::Delete id=%s", endpoint.Id)
|
||||
|
||||
if err := deleteEndpoint(endpoint.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModifyEndpointSettings updates the Port/Policy of an Endpoint.
|
||||
func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error {
|
||||
logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId)
|
||||
|
||||
endpointSettingsRequest, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = modifyEndpoint(endpointId, string(endpointSettingsRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint.
|
||||
func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error {
|
||||
logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id)
|
||||
|
||||
settingsJson, err := json.Marshal(endpointPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
requestMessage := &ModifyEndpointSettingRequest{
|
||||
ResourceType: EndpointResourceTypePolicy,
|
||||
RequestType: requestType,
|
||||
Settings: settingsJson,
|
||||
}
|
||||
|
||||
return ModifyEndpointSettings(endpoint.Id, requestMessage)
|
||||
}
|
||||
|
||||
// NamespaceAttach modifies a Namespace to add an endpoint.
|
||||
func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error {
|
||||
return AddNamespaceEndpoint(namespaceId, endpoint.Id)
|
||||
}
|
||||
|
||||
// NamespaceDetach modifies a Namespace to remove an endpoint.
|
||||
func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error {
|
||||
return RemoveNamespaceEndpoint(namespaceId, endpoint.Id)
|
||||
}
|
164
vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
generated
vendored
164
vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server
|
||||
// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS).
|
||||
package hcn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidNetworkID = errors.New("invalid network ID")
|
||||
errInvalidEndpointID = errors.New("invalid endpoint ID")
|
||||
errInvalidNamespaceID = errors.New("invalid namespace ID")
|
||||
errInvalidLoadBalancerID = errors.New("invalid load balancer ID")
|
||||
errInvalidRouteID = errors.New("invalid route ID")
|
||||
)
|
||||
|
||||
func checkForErrors(methodName string, hr error, resultBuffer *uint16) error {
|
||||
errorFound := false
|
||||
|
||||
if hr != nil {
|
||||
errorFound = true
|
||||
}
|
||||
|
||||
result := ""
|
||||
if resultBuffer != nil {
|
||||
result = interop.ConvertAndFreeCoTaskMemString(resultBuffer)
|
||||
if result != "" {
|
||||
errorFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if errorFound {
|
||||
returnError := new(hr, methodName, result)
|
||||
logrus.Debugf(returnError.Error()) // HCN errors logged for debugging.
|
||||
return returnError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ErrorCode uint32
|
||||
|
||||
// For common errors, define the error as it is in windows, so we can quickly determine it later
|
||||
const (
|
||||
ERROR_NOT_FOUND = 0x490
|
||||
HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013
|
||||
)
|
||||
|
||||
type HcnError struct {
|
||||
*hcserror.HcsError
|
||||
code ErrorCode
|
||||
}
|
||||
|
||||
func (e *HcnError) Error() string {
|
||||
return e.HcsError.Error()
|
||||
}
|
||||
|
||||
func CheckErrorWithCode(err error, code ErrorCode) bool {
|
||||
hcnError, ok := err.(*HcnError)
|
||||
if ok {
|
||||
return hcnError.code == code
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsElementNotFoundError(err error) bool {
|
||||
return CheckErrorWithCode(err, ERROR_NOT_FOUND)
|
||||
}
|
||||
|
||||
func IsPortAlreadyExistsError(err error) bool {
|
||||
return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS)
|
||||
}
|
||||
|
||||
func new(hr error, title string, rest string) error {
|
||||
err := &HcnError{}
|
||||
hcsError := hcserror.New(hr, title, rest)
|
||||
err.HcsError = hcsError.(*hcserror.HcsError)
|
||||
err.code = ErrorCode(hcserror.Win32FromError(hr))
|
||||
return err
|
||||
}
|
||||
|
||||
//
|
||||
// Note that the below errors are not errors returned by hcn itself
|
||||
// we wish to seperate them as they are shim usage error
|
||||
//
|
||||
|
||||
// NetworkNotFoundError results from a failed seach for a network by Id or Name
|
||||
type NetworkNotFoundError struct {
|
||||
NetworkName string
|
||||
NetworkID string
|
||||
}
|
||||
|
||||
func (e NetworkNotFoundError) Error() string {
|
||||
if e.NetworkName != "" {
|
||||
return fmt.Sprintf("Network name %q not found", e.NetworkName)
|
||||
}
|
||||
return fmt.Sprintf("Network ID %q not found", e.NetworkID)
|
||||
}
|
||||
|
||||
// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name
|
||||
type EndpointNotFoundError struct {
|
||||
EndpointName string
|
||||
EndpointID string
|
||||
}
|
||||
|
||||
func (e EndpointNotFoundError) Error() string {
|
||||
if e.EndpointName != "" {
|
||||
return fmt.Sprintf("Endpoint name %q not found", e.EndpointName)
|
||||
}
|
||||
return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID)
|
||||
}
|
||||
|
||||
// NamespaceNotFoundError results from a failed seach for a namsepace by Id
|
||||
type NamespaceNotFoundError struct {
|
||||
NamespaceID string
|
||||
}
|
||||
|
||||
func (e NamespaceNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID)
|
||||
}
|
||||
|
||||
// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id
|
||||
type LoadBalancerNotFoundError struct {
|
||||
LoadBalancerId string
|
||||
}
|
||||
|
||||
func (e LoadBalancerNotFoundError) Error() string {
|
||||
return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId)
|
||||
}
|
||||
|
||||
// RouteNotFoundError results from a failed seach for a route by Id
|
||||
type RouteNotFoundError struct {
|
||||
RouteId string
|
||||
}
|
||||
|
||||
func (e RouteNotFoundError) Error() string {
|
||||
return fmt.Sprintf("SDN Route %q not found", e.RouteId)
|
||||
}
|
||||
|
||||
// IsNotFoundError returns a boolean indicating whether the error was caused by
|
||||
// a resource not being found.
|
||||
func IsNotFoundError(err error) bool {
|
||||
switch pe := err.(type) {
|
||||
case NetworkNotFoundError:
|
||||
return true
|
||||
case EndpointNotFoundError:
|
||||
return true
|
||||
case NamespaceNotFoundError:
|
||||
return true
|
||||
case LoadBalancerNotFoundError:
|
||||
return true
|
||||
case RouteNotFoundError:
|
||||
return true
|
||||
case *hcserror.HcsError:
|
||||
return pe.Err == hcs.ErrElementNotFound
|
||||
}
|
||||
return false
|
||||
}
|
112
vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go
generated
vendored
112
vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go
generated
vendored
@ -1,112 +0,0 @@
|
||||
package hcn
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
"github.com/Microsoft/hcsshim/internal/interop"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Globals are all global properties of the HCN Service.
|
||||
type Globals struct {
|
||||
Version Version `json:"Version"`
|
||||
}
|
||||
|
||||
// Version is the HCN Service version.
|
||||
type Version struct {
|
||||
Major int `json:"Major"`
|
||||
Minor int `json:"Minor"`
|
||||
}
|
||||
|
||||
type VersionRange struct {
|
||||
MinVersion Version
|
||||
MaxVersion Version
|
||||
}
|
||||
|
||||
type VersionRanges []VersionRange
|
||||
|
||||
var (
|
||||
// HNSVersion1803 added ACL functionality.
|
||||
HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
|
||||
// V2ApiSupport allows the use of V2 Api calls and V2 Schema.
|
||||
V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
|
||||
// Remote Subnet allows for Remote Subnet policies on Overlay networks
|
||||
RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
|
||||
// A Host Route policy allows for local container to local host communication Overlay networks
|
||||
HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
|
||||
// HNS 10.2 allows for Direct Server Return for loadbalancing
|
||||
DSRVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
|
||||
// HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes
|
||||
Slash32EndpointPrefixesVersion = VersionRanges{
|
||||
VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}},
|
||||
VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
|
||||
}
|
||||
// HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN
|
||||
AclSupportForProtocol252Version = VersionRanges{
|
||||
VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}},
|
||||
VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
|
||||
}
|
||||
// HNS 12.0 allows for session affinity for loadbalancing
|
||||
SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
|
||||
// HNS 10.5 through 11 (not included) and 12.0+ supports Ipv6 dual stack.
|
||||
IPv6DualStackVersion = VersionRanges{
|
||||
VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}},
|
||||
VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
|
||||
}
|
||||
)
|
||||
|
||||
// GetGlobals returns the global properties of the HCN Service.
|
||||
func GetGlobals() (*Globals, error) {
|
||||
var version Version
|
||||
err := hnsCall("GET", "/globals/version", "", &version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
globals := &Globals{
|
||||
Version: version,
|
||||
}
|
||||
|
||||
return globals, nil
|
||||
}
|
||||
|
||||
type hnsResponse struct {
|
||||
Success bool
|
||||
Error string
|
||||
Output json.RawMessage
|
||||
}
|
||||
|
||||
func hnsCall(method, path, request string, returnResponse interface{}) error {
|
||||
var responseBuffer *uint16
|
||||
logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request)
|
||||
|
||||
err := _hnsCall(method, path, request, &responseBuffer)
|
||||
if err != nil {
|
||||
return hcserror.New(err, "hnsCall ", "")
|
||||
}
|
||||
response := interop.ConvertAndFreeCoTaskMemString(responseBuffer)
|
||||
|
||||
hnsresponse := &hnsResponse{}
|
||||
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !hnsresponse.Success {
|
||||
return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
|
||||
}
|
||||
|
||||
if len(hnsresponse.Output) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Network Response : %s", hnsresponse.Output)
|
||||
err = json.Unmarshal(hnsresponse.Output, returnResponse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user