Merge remote-tracking branch 'upstream/master' into test-cmd-what
This commit is contained in:
70
Godeps/Godeps.json
generated
70
Godeps/Godeps.json
generated
@@ -3,15 +3,17 @@
|
|||||||
"GoVersion": "go1.11",
|
"GoVersion": "go1.11",
|
||||||
"GodepVersion": "v80-k8s-r1",
|
"GodepVersion": "v80-k8s-r1",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"github.com/onsi/ginkgo/ginkgo",
|
"github.com/bazelbuild/bazel-gazelle/cmd/gazelle",
|
||||||
"github.com/jteeuwen/go-bindata/go-bindata",
|
"github.com/cespare/prettybench",
|
||||||
"github.com/client9/misspell/cmd/misspell",
|
"github.com/client9/misspell/cmd/misspell",
|
||||||
"github.com/cloudflare/cfssl/cmd/cfssl",
|
"github.com/cloudflare/cfssl/cmd/cfssl",
|
||||||
"github.com/cloudflare/cfssl/cmd/cfssljson",
|
"github.com/cloudflare/cfssl/cmd/cfssljson",
|
||||||
"github.com/bazelbuild/bazel-gazelle/cmd/gazelle",
|
"github.com/jstemmer/go-junit-report",
|
||||||
|
"github.com/jteeuwen/go-bindata/go-bindata",
|
||||||
|
"github.com/onsi/ginkgo/ginkgo",
|
||||||
|
"golang.org/x/lint/golint",
|
||||||
"k8s.io/kube-openapi/cmd/openapi-gen",
|
"k8s.io/kube-openapi/cmd/openapi-gen",
|
||||||
"k8s.io/repo-infra/kazel",
|
"k8s.io/repo-infra/kazel",
|
||||||
"golang.org/x/lint/golint",
|
|
||||||
"./..."
|
"./..."
|
||||||
],
|
],
|
||||||
"Deps": [
|
"Deps": [
|
||||||
@@ -488,6 +490,10 @@
|
|||||||
"Comment": "v3.5.0",
|
"Comment": "v3.5.0",
|
||||||
"Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6"
|
"Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/cespare/prettybench",
|
||||||
|
"Rev": "03b8cfe5406ce67a0b0da46f0c9e78b3d915a2c1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/chai2010/gettext-go/gettext",
|
"ImportPath": "github.com/chai2010/gettext-go/gettext",
|
||||||
"Rev": "c6fed771bfd517099caf0f7a961671fa8ed08723"
|
"Rev": "c6fed771bfd517099caf0f7a961671fa8ed08723"
|
||||||
@@ -2374,6 +2380,18 @@
|
|||||||
"Comment": "1.1.4",
|
"Comment": "1.1.4",
|
||||||
"Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
|
"Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/jstemmer/go-junit-report",
|
||||||
|
"Rev": "af01ea7f8024089b458d804d5cdf190f962a9a0c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/jstemmer/go-junit-report/formatter",
|
||||||
|
"Rev": "af01ea7f8024089b458d804d5cdf190f962a9a0c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/jstemmer/go-junit-report/parser",
|
||||||
|
"Rev": "af01ea7f8024089b458d804d5cdf190f962a9a0c"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/jteeuwen/go-bindata",
|
"ImportPath": "github.com/jteeuwen/go-bindata",
|
||||||
"Comment": "v3.0.7-72-ga0ff2567cfb709",
|
"Comment": "v3.0.7-72-ga0ff2567cfb709",
|
||||||
@@ -3624,31 +3642,59 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/benchmark/parse",
|
"ImportPath": "golang.org/x/tools/benchmark/parse",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/container/intsets",
|
"ImportPath": "golang.org/x/tools/container/intsets",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/go/gcexportdata",
|
"ImportPath": "golang.org/x/tools/go/gcexportdata",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/go/gcimporter15",
|
"ImportPath": "golang.org/x/tools/go/internal/cgo",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/internal/gcimporter",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/internal/packagesdriver",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/packages",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/go/vcs",
|
"ImportPath": "golang.org/x/tools/go/vcs",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/imports",
|
"ImportPath": "golang.org/x/tools/imports",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/fastwalk",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/gopathwalk",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/module",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/semver",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gonum.org/v1/gonum/blas",
|
"ImportPath": "gonum.org/v1/gonum/blas",
|
||||||
|
|||||||
361
Godeps/LICENSES
generated
361
Godeps/LICENSES
generated
@@ -15061,6 +15061,36 @@ THE SOFTWARE.
|
|||||||
================================================================================
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/github.com/cespare/prettybench licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2014 Caleb Spare
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
= vendor/github.com/cespare/prettybench/LICENSE.txt 673caf450638c643cfede79f39c33087
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
================================================================================
|
================================================================================
|
||||||
= vendor/github.com/chai2010/gettext-go/gettext licensed under: =
|
= vendor/github.com/chai2010/gettext-go/gettext licensed under: =
|
||||||
|
|
||||||
@@ -73804,6 +73834,90 @@ SOFTWARE.
|
|||||||
================================================================================
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/github.com/jstemmer/go-junit-report licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2012 Joel Stemmer
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
= vendor/github.com/jstemmer/go-junit-report/LICENSE 5d54de929d5bc244d21d8d4d91a6ac64
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/github.com/jstemmer/go-junit-report/formatter licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2012 Joel Stemmer
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
= vendor/github.com/jstemmer/go-junit-report/LICENSE 5d54de929d5bc244d21d8d4d91a6ac64
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/github.com/jstemmer/go-junit-report/parser licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2012 Joel Stemmer
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
= vendor/github.com/jstemmer/go-junit-report/LICENSE 5d54de929d5bc244d21d8d4d91a6ac64
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
================================================================================
|
================================================================================
|
||||||
= vendor/github.com/jteeuwen/go-bindata licensed under: =
|
= vendor/github.com/jteeuwen/go-bindata licensed under: =
|
||||||
|
|
||||||
@@ -101687,7 +101801,112 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||||||
|
|
||||||
|
|
||||||
================================================================================
|
================================================================================
|
||||||
= vendor/golang.org/x/tools/go/gcimporter15 licensed under: =
|
= vendor/golang.org/x/tools/go/internal/cgo licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/golang.org/x/tools/go/internal/gcimporter licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/golang.org/x/tools/go/internal/packagesdriver licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/golang.org/x/tools/go/packages licensed under: =
|
||||||
|
|
||||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
@@ -101791,6 +102010,146 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||||||
================================================================================
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/golang.org/x/tools/internal/fastwalk licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/golang.org/x/tools/internal/gopathwalk licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/golang.org/x/tools/internal/module licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
|
================================================================================
|
||||||
|
= vendor/golang.org/x/tools/internal/semver licensed under: =
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
= vendor/golang.org/x/tools/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
|
||||||
|
================================================================================
|
||||||
|
|
||||||
|
|
||||||
================================================================================
|
================================================================================
|
||||||
= vendor/gonum.org/v1/gonum/blas licensed under: =
|
= vendor/gonum.org/v1/gonum/blas licensed under: =
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,7 @@
|
|||||||
approvers:
|
approvers:
|
||||||
- castrojo
|
|
||||||
- chuckbutler
|
|
||||||
- marcoceppi
|
|
||||||
- mbruzek
|
|
||||||
- Cynerva
|
- Cynerva
|
||||||
- ktsakalozos
|
- ktsakalozos
|
||||||
reviewers:
|
reviewers:
|
||||||
- chuckbutler
|
|
||||||
- marcoceppi
|
|
||||||
- mbruzek
|
|
||||||
- thockin
|
- thockin
|
||||||
- mikedanese
|
- mikedanese
|
||||||
- eparis
|
- eparis
|
||||||
|
|||||||
@@ -176,6 +176,7 @@
|
|||||||
./test/cmd/core.sh
|
./test/cmd/core.sh
|
||||||
./test/cmd/crd.sh
|
./test/cmd/crd.sh
|
||||||
./test/cmd/create.sh
|
./test/cmd/create.sh
|
||||||
|
./test/cmd/delete.sh
|
||||||
./test/cmd/diff.sh
|
./test/cmd/diff.sh
|
||||||
./test/cmd/discovery.sh
|
./test/cmd/discovery.sh
|
||||||
./test/cmd/generic-resources.sh
|
./test/cmd/generic-resources.sh
|
||||||
|
|||||||
@@ -55,15 +55,17 @@ fi
|
|||||||
# Some things we want in godeps aren't code dependencies, so ./...
|
# Some things we want in godeps aren't code dependencies, so ./...
|
||||||
# won't pick them up.
|
# won't pick them up.
|
||||||
REQUIRED_BINS=(
|
REQUIRED_BINS=(
|
||||||
"github.com/onsi/ginkgo/ginkgo"
|
"github.com/bazelbuild/bazel-gazelle/cmd/gazelle"
|
||||||
"github.com/jteeuwen/go-bindata/go-bindata"
|
"github.com/cespare/prettybench"
|
||||||
"github.com/client9/misspell/cmd/misspell"
|
"github.com/client9/misspell/cmd/misspell"
|
||||||
"github.com/cloudflare/cfssl/cmd/cfssl"
|
"github.com/cloudflare/cfssl/cmd/cfssl"
|
||||||
"github.com/cloudflare/cfssl/cmd/cfssljson"
|
"github.com/cloudflare/cfssl/cmd/cfssljson"
|
||||||
"github.com/bazelbuild/bazel-gazelle/cmd/gazelle"
|
"github.com/jstemmer/go-junit-report"
|
||||||
|
"github.com/jteeuwen/go-bindata/go-bindata"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo"
|
||||||
|
"golang.org/x/lint/golint"
|
||||||
"k8s.io/kube-openapi/cmd/openapi-gen"
|
"k8s.io/kube-openapi/cmd/openapi-gen"
|
||||||
"k8s.io/repo-infra/kazel"
|
"k8s.io/repo-infra/kazel"
|
||||||
"golang.org/x/lint/golint"
|
|
||||||
"./..."
|
"./..."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ retry() {
|
|||||||
|
|
||||||
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
|
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
|
||||||
|
|
||||||
retry go get github.com/cespare/prettybench
|
go install k8s.io/kubernetes/vendor/github.com/cespare/prettybench
|
||||||
|
|
||||||
# Disable the Go race detector.
|
# Disable the Go race detector.
|
||||||
export KUBE_RACE=" "
|
export KUBE_RACE=" "
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ export PATH=${GOPATH}/bin:${HOME}/third_party/etcd:/usr/local/go/bin:${PATH}
|
|||||||
|
|
||||||
# Install a few things needed by unit and /integration tests.
|
# Install a few things needed by unit and /integration tests.
|
||||||
command -v etcd &>/dev/null || ./hack/install-etcd.sh
|
command -v etcd &>/dev/null || ./hack/install-etcd.sh
|
||||||
go get -u github.com/jstemmer/go-junit-report
|
go install k8s.io/kubernetes/vendor/github.com/jstemmer/go-junit-report
|
||||||
|
|
||||||
# Enable the Go race detector.
|
# Enable the Go race detector.
|
||||||
export KUBE_RACE=-race
|
export KUBE_RACE=-race
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ retry() {
|
|||||||
|
|
||||||
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
|
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
|
||||||
|
|
||||||
retry go get github.com/jstemmer/go-junit-report
|
go install k8s.io/kubernetes/vendor/github.com/jstemmer/go-junit-report
|
||||||
|
|
||||||
# Enable the Go race detector.
|
# Enable the Go race detector.
|
||||||
export KUBE_RACE=-race
|
export KUBE_RACE=-race
|
||||||
|
|||||||
@@ -4725,12 +4725,8 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
|
|||||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
|
allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
|
||||||
|
|
||||||
if helper.IsHugePageResourceName(resourceName) {
|
if helper.IsHugePageResourceName(resourceName) {
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) {
|
|
||||||
allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName)))
|
|
||||||
} else {
|
|
||||||
limContainsHugePages = true
|
limContainsHugePages = true
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if supportedQoSComputeResources.Has(string(resourceName)) {
|
if supportedQoSComputeResources.Has(string(resourceName)) {
|
||||||
limContainsCpuOrMemory = true
|
limContainsCpuOrMemory = true
|
||||||
|
|||||||
@@ -53,8 +53,6 @@ const (
|
|||||||
VolDir = "kubevols"
|
VolDir = "kubevols"
|
||||||
RoundTripperDefaultCount = 3
|
RoundTripperDefaultCount = 3
|
||||||
DummyVMPrefixName = "vsphere-k8s"
|
DummyVMPrefixName = "vsphere-k8s"
|
||||||
MacOuiVC = "00:50:56"
|
|
||||||
MacOuiEsx = "00:0c:29"
|
|
||||||
CleanUpDummyVMRoutineInterval = 5
|
CleanUpDummyVMRoutineInterval = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -533,6 +531,15 @@ func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getLocalIP() ([]v1.NodeAddress, error) {
|
func getLocalIP() ([]v1.NodeAddress, error) {
|
||||||
|
// hashtable with VMware-allocated OUIs for MAC filtering
|
||||||
|
// List of official OUIs: http://standards-oui.ieee.org/oui.txt
|
||||||
|
vmwareOUI := map[string]bool{
|
||||||
|
"00:05:69": true,
|
||||||
|
"00:0c:29": true,
|
||||||
|
"00:1c:14": true,
|
||||||
|
"00:50:56": true,
|
||||||
|
}
|
||||||
|
|
||||||
addrs := []v1.NodeAddress{}
|
addrs := []v1.NodeAddress{}
|
||||||
ifaces, err := net.Interfaces()
|
ifaces, err := net.Interfaces()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -548,9 +555,12 @@ func getLocalIP() ([]v1.NodeAddress, error) {
|
|||||||
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||||
if ipnet.IP.To4() != nil {
|
if ipnet.IP.To4() != nil {
|
||||||
// Filter external IP by MAC address OUIs from vCenter and from ESX
|
// Filter external IP by MAC address OUIs from vCenter and from ESX
|
||||||
var addressType v1.NodeAddressType
|
vmMACAddr := strings.ToLower(i.HardwareAddr.String())
|
||||||
if strings.HasPrefix(i.HardwareAddr.String(), MacOuiVC) ||
|
// Making sure that the MAC address is long enough
|
||||||
strings.HasPrefix(i.HardwareAddr.String(), MacOuiEsx) {
|
if len(vmMACAddr) < 17 {
|
||||||
|
return addrs, fmt.Errorf("MAC address %q is invalid", vmMACAddr)
|
||||||
|
}
|
||||||
|
if vmwareOUI[vmMACAddr[:8]] {
|
||||||
nodehelpers.AddToNodeAddresses(&addrs,
|
nodehelpers.AddToNodeAddresses(&addrs,
|
||||||
v1.NodeAddress{
|
v1.NodeAddress{
|
||||||
Type: v1.NodeExternalIP,
|
Type: v1.NodeExternalIP,
|
||||||
@@ -561,8 +571,10 @@ func getLocalIP() ([]v1.NodeAddress, error) {
|
|||||||
Address: ipnet.IP.String(),
|
Address: ipnet.IP.String(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
klog.V(4).Infof("Detected local IP address as %q", ipnet.IP.String())
|
||||||
|
} else {
|
||||||
|
klog.Warningf("Failed to patch IP as MAC address %q does not belong to a VMware platform", vmMACAddr)
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"doc.go",
|
"doc.go",
|
||||||
"endpoints_controller.go",
|
"endpoints_controller.go",
|
||||||
|
"trigger_time_tracker.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/pkg/controller/endpoint",
|
importpath = "k8s.io/kubernetes/pkg/controller/endpoint",
|
||||||
deps = [
|
deps = [
|
||||||
@@ -39,7 +40,10 @@ go_library(
|
|||||||
|
|
||||||
go_test(
|
go_test(
|
||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = ["endpoints_controller_test.go"],
|
srcs = [
|
||||||
|
"endpoints_controller_test.go",
|
||||||
|
"trigger_time_tracker_test.go",
|
||||||
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/testapi:go_default_library",
|
"//pkg/api/testapi:go_default_library",
|
||||||
|
|||||||
@@ -101,6 +101,8 @@ func NewEndpointController(podInformer coreinformers.PodInformer, serviceInforme
|
|||||||
e.endpointsLister = endpointsInformer.Lister()
|
e.endpointsLister = endpointsInformer.Lister()
|
||||||
e.endpointsSynced = endpointsInformer.Informer().HasSynced
|
e.endpointsSynced = endpointsInformer.Informer().HasSynced
|
||||||
|
|
||||||
|
e.triggerTimeTracker = NewTriggerTimeTracker()
|
||||||
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,6 +140,10 @@ type EndpointController struct {
|
|||||||
|
|
||||||
// workerLoopPeriod is the time between worker runs. The workers process the queue of service and pod changes.
|
// workerLoopPeriod is the time between worker runs. The workers process the queue of service and pod changes.
|
||||||
workerLoopPeriod time.Duration
|
workerLoopPeriod time.Duration
|
||||||
|
|
||||||
|
// triggerTimeTracker is an util used to compute and export the EndpointsLastChangeTriggerTime
|
||||||
|
// annotation.
|
||||||
|
triggerTimeTracker *TriggerTimeTracker
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run will not return until stopCh is closed. workers determines how many
|
// Run will not return until stopCh is closed. workers determines how many
|
||||||
@@ -399,6 +405,7 @@ func (e *EndpointController) syncService(key string) error {
|
|||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
e.triggerTimeTracker.DeleteEndpoints(namespace, name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -427,6 +434,12 @@ func (e *EndpointController) syncService(key string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We call ComputeEndpointsLastChangeTriggerTime here to make sure that the state of the trigger
|
||||||
|
// time tracker gets updated even if the sync turns out to be no-op and we don't update the
|
||||||
|
// endpoints object.
|
||||||
|
endpointsLastChangeTriggerTime := e.triggerTimeTracker.
|
||||||
|
ComputeEndpointsLastChangeTriggerTime(namespace, name, service, pods)
|
||||||
|
|
||||||
subsets := []v1.EndpointSubset{}
|
subsets := []v1.EndpointSubset{}
|
||||||
var totalReadyEps int
|
var totalReadyEps int
|
||||||
var totalNotReadyEps int
|
var totalNotReadyEps int
|
||||||
@@ -506,6 +519,11 @@ func (e *EndpointController) syncService(key string) error {
|
|||||||
newEndpoints.Annotations = make(map[string]string)
|
newEndpoints.Annotations = make(map[string]string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !endpointsLastChangeTriggerTime.IsZero() {
|
||||||
|
newEndpoints.Annotations[v1.EndpointsLastChangeTriggerTime] =
|
||||||
|
endpointsLastChangeTriggerTime.Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
|
||||||
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
||||||
if createEndpoints {
|
if createEndpoints {
|
||||||
// No previous endpoints, create them
|
// No previous endpoints, create them
|
||||||
|
|||||||
@@ -44,6 +44,9 @@ import (
|
|||||||
var alwaysReady = func() bool { return true }
|
var alwaysReady = func() bool { return true }
|
||||||
var neverReady = func() bool { return false }
|
var neverReady = func() bool { return false }
|
||||||
var emptyNodeName string
|
var emptyNodeName string
|
||||||
|
var triggerTime = time.Date(2018, 01, 01, 0, 0, 0, 0, time.UTC)
|
||||||
|
var triggerTimeString = triggerTime.Format(time.RFC3339Nano)
|
||||||
|
var oldTriggerTimeString = triggerTime.Add(-time.Hour).Format(time.RFC3339Nano)
|
||||||
|
|
||||||
func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
|
func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
|
||||||
for i := 0; i < nPods+nNotReady; i++ {
|
for i := 0; i < nPods+nNotReady; i++ {
|
||||||
@@ -1175,3 +1178,94 @@ func TestDetermineNeededServiceUpdates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLastTriggerChangeTimeAnnotation(t *testing.T) {
|
||||||
|
ns := "other"
|
||||||
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
|
defer testServer.Close()
|
||||||
|
endpoints := newController(testServer.URL)
|
||||||
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
Namespace: ns,
|
||||||
|
ResourceVersion: "1",
|
||||||
|
},
|
||||||
|
Subsets: []v1.EndpointSubset{{
|
||||||
|
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||||
|
Ports: []v1.EndpointPort{{Port: 1000, Protocol: "TCP"}},
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
addPods(endpoints.podStore, ns, 1, 1, 0)
|
||||||
|
endpoints.serviceStore.Add(&v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns, CreationTimestamp: metav1.NewTime(triggerTime)},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Selector: map[string]string{},
|
||||||
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
endpoints.syncService(ns + "/foo")
|
||||||
|
|
||||||
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
|
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
Namespace: ns,
|
||||||
|
ResourceVersion: "1",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
v1.EndpointsLastChangeTriggerTime: triggerTimeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Subsets: []v1.EndpointSubset{{
|
||||||
|
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||||
|
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLastTriggerChangeTimeAnnotation_AnnotationOverridden(t *testing.T) {
|
||||||
|
ns := "other"
|
||||||
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
|
defer testServer.Close()
|
||||||
|
endpoints := newController(testServer.URL)
|
||||||
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
Namespace: ns,
|
||||||
|
ResourceVersion: "1",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
v1.EndpointsLastChangeTriggerTime: oldTriggerTimeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Subsets: []v1.EndpointSubset{{
|
||||||
|
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||||
|
Ports: []v1.EndpointPort{{Port: 1000, Protocol: "TCP"}},
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
addPods(endpoints.podStore, ns, 1, 1, 0)
|
||||||
|
endpoints.serviceStore.Add(&v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns, CreationTimestamp: metav1.NewTime(triggerTime)},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Selector: map[string]string{},
|
||||||
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
endpoints.syncService(ns + "/foo")
|
||||||
|
|
||||||
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
|
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
Namespace: ns,
|
||||||
|
ResourceVersion: "1",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
v1.EndpointsLastChangeTriggerTime: triggerTimeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Subsets: []v1.EndpointSubset{{
|
||||||
|
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||||
|
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||||
|
}
|
||||||
|
|||||||
163
pkg/controller/endpoint/trigger_time_tracker.go
Normal file
163
pkg/controller/endpoint/trigger_time_tracker.go
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package endpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TriggerTimeTracker is a util used to compute the EndpointsLastChangeTriggerTime annotation which
|
||||||
|
// is exported in the endpoints controller's sync function.
|
||||||
|
// See the documentation of the EndpointsLastChangeTriggerTime annotation for more details.
|
||||||
|
//
|
||||||
|
// Please note that this util may compute a wrong EndpointsLastChangeTriggerTime if a same object
|
||||||
|
// changes multiple times between two consecutive syncs. We're aware of this limitation but we
|
||||||
|
// decided to accept it, as fixing it would require a major rewrite of the endpoints controller and
|
||||||
|
// Informer framework. Such situations, i.e. frequent updates of the same object in a single sync
|
||||||
|
// period, should be relatively rare and therefore this util should provide a good approximation of
|
||||||
|
// the EndpointsLastChangeTriggerTime.
|
||||||
|
// TODO(mm4tt): Implement a more robust mechanism that is not subject to the above limitations.
|
||||||
|
type TriggerTimeTracker struct {
|
||||||
|
// endpointsStates is a map, indexed by Endpoints object key, storing the last known Endpoints
|
||||||
|
// object state observed during the most recent call of the ComputeEndpointsLastChangeTriggerTime
|
||||||
|
// function.
|
||||||
|
endpointsStates map[endpointsKey]endpointsState
|
||||||
|
|
||||||
|
// mutex guarding the endpointsStates map.
|
||||||
|
mutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTriggerTimeTracker creates a new instance of the TriggerTimeTracker.
|
||||||
|
func NewTriggerTimeTracker() *TriggerTimeTracker {
|
||||||
|
return &TriggerTimeTracker{
|
||||||
|
endpointsStates: make(map[endpointsKey]endpointsState),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// endpointsKey is a key uniquely identifying an Endpoints object.
|
||||||
|
type endpointsKey struct {
|
||||||
|
// namespace, name composing a namespaced name - an unique identifier of every Endpoints object.
|
||||||
|
namespace, name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// endpointsState represents a state of an Endpoints object that is known to this util.
|
||||||
|
type endpointsState struct {
|
||||||
|
// lastServiceTriggerTime is a service trigger time observed most recently.
|
||||||
|
lastServiceTriggerTime time.Time
|
||||||
|
// lastPodTriggerTimes is a map (Pod name -> time) storing the pod trigger times that were
|
||||||
|
// observed during the most recent call of the ComputeEndpointsLastChangeTriggerTime function.
|
||||||
|
lastPodTriggerTimes map[string]time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeEndpointsLastChangeTriggerTime updates the state of the Endpoints object being synced
|
||||||
|
// and returns the time that should be exported as the EndpointsLastChangeTriggerTime annotation.
|
||||||
|
//
|
||||||
|
// If the method returns a 'zero' time the EndpointsLastChangeTriggerTime annotation shouldn't be
|
||||||
|
// exported.
|
||||||
|
//
|
||||||
|
// Please note that this function may compute a wrong EndpointsLastChangeTriggerTime value if the
|
||||||
|
// same object (pod/service) changes multiple times between two consecutive syncs.
|
||||||
|
//
|
||||||
|
// Important: This method is go-routing safe but only when called for different keys. The method
|
||||||
|
// shouldn't be called concurrently for the same key! This contract is fulfilled in the current
|
||||||
|
// implementation of the endpoints controller.
|
||||||
|
func (t *TriggerTimeTracker) ComputeEndpointsLastChangeTriggerTime(
|
||||||
|
namespace, name string, service *v1.Service, pods []*v1.Pod) time.Time {
|
||||||
|
|
||||||
|
key := endpointsKey{namespace: namespace, name: name}
|
||||||
|
// As there won't be any concurrent calls for the same key, we need to guard access only to the
|
||||||
|
// endpointsStates map.
|
||||||
|
t.mutex.Lock()
|
||||||
|
state, wasKnown := t.endpointsStates[key]
|
||||||
|
t.mutex.Unlock()
|
||||||
|
|
||||||
|
// Update the state before returning.
|
||||||
|
defer func() {
|
||||||
|
t.mutex.Lock()
|
||||||
|
t.endpointsStates[key] = state
|
||||||
|
t.mutex.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// minChangedTriggerTime is the min trigger time of all trigger times that have changed since the
|
||||||
|
// last sync.
|
||||||
|
var minChangedTriggerTime time.Time
|
||||||
|
// TODO(mm4tt): If memory allocation / GC performance impact of recreating map in every call
|
||||||
|
// turns out to be too expensive, we should consider rewriting this to reuse the existing map.
|
||||||
|
podTriggerTimes := make(map[string]time.Time)
|
||||||
|
for _, pod := range pods {
|
||||||
|
if podTriggerTime := getPodTriggerTime(pod); !podTriggerTime.IsZero() {
|
||||||
|
podTriggerTimes[pod.Name] = podTriggerTime
|
||||||
|
if podTriggerTime.After(state.lastPodTriggerTimes[pod.Name]) {
|
||||||
|
// Pod trigger time has changed since the last sync, update minChangedTriggerTime.
|
||||||
|
minChangedTriggerTime = min(minChangedTriggerTime, podTriggerTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
serviceTriggerTime := getServiceTriggerTime(service)
|
||||||
|
if serviceTriggerTime.After(state.lastServiceTriggerTime) {
|
||||||
|
// Service trigger time has changed since the last sync, update minChangedTriggerTime.
|
||||||
|
minChangedTriggerTime = min(minChangedTriggerTime, serviceTriggerTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
state.lastPodTriggerTimes = podTriggerTimes
|
||||||
|
state.lastServiceTriggerTime = serviceTriggerTime
|
||||||
|
|
||||||
|
if !wasKnown {
|
||||||
|
// New Endpoints object / new Service, use Service creationTimestamp.
|
||||||
|
return service.CreationTimestamp.Time
|
||||||
|
} else {
|
||||||
|
// Regular update of the Endpoints object, return min of changed trigger times.
|
||||||
|
return minChangedTriggerTime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteEndpoints deletes endpoints state stored in this util.
|
||||||
|
func (t *TriggerTimeTracker) DeleteEndpoints(namespace, name string) {
|
||||||
|
key := endpointsKey{namespace: namespace, name: name}
|
||||||
|
t.mutex.Lock()
|
||||||
|
defer t.mutex.Unlock()
|
||||||
|
delete(t.endpointsStates, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPodTriggerTime returns the time of the pod change (trigger) that resulted or will result in
|
||||||
|
// the endpoints object change.
|
||||||
|
func getPodTriggerTime(pod *v1.Pod) (triggerTime time.Time) {
|
||||||
|
if readyCondition := podutil.GetPodReadyCondition(pod.Status); readyCondition != nil {
|
||||||
|
triggerTime = readyCondition.LastTransitionTime.Time
|
||||||
|
}
|
||||||
|
// TODO(mm4tt): Implement missing cases: deletionTime set, pod label change
|
||||||
|
return triggerTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// getServiceTriggerTime returns the time of the service change (trigger) that resulted or will
|
||||||
|
// result in the endpoints object change.
|
||||||
|
func getServiceTriggerTime(service *v1.Service) (triggerTime time.Time) {
|
||||||
|
// TODO(mm4tt): Ideally we should look at service.LastUpdateTime, but such thing doesn't exist.
|
||||||
|
return service.CreationTimestamp.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// min returns minimum of the currentMin and newValue or newValue if the currentMin is not set.
|
||||||
|
func min(currentMin, newValue time.Time) time.Time {
|
||||||
|
if currentMin.IsZero() || newValue.Before(currentMin) {
|
||||||
|
return newValue
|
||||||
|
}
|
||||||
|
return currentMin
|
||||||
|
}
|
||||||
204
pkg/controller/endpoint/trigger_time_tracker_test.go
Normal file
204
pkg/controller/endpoint/trigger_time_tracker_test.go
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package endpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
t0 = time.Date(2019, 01, 01, 0, 0, 0, 0, time.UTC)
|
||||||
|
t1 = t0.Add(time.Second)
|
||||||
|
t2 = t1.Add(time.Second)
|
||||||
|
t3 = t2.Add(time.Second)
|
||||||
|
t4 = t3.Add(time.Second)
|
||||||
|
t5 = t4.Add(time.Second)
|
||||||
|
|
||||||
|
ns = "ns1"
|
||||||
|
name = "my-service"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewService_NoPods(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t2)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service).expect(t2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewService_ExistingPods(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t3)
|
||||||
|
pod1 := createPod(ns, "pod1", t0)
|
||||||
|
pod2 := createPod(ns, "pod2", t1)
|
||||||
|
pod3 := createPod(ns, "pod3", t5)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2, pod3).
|
||||||
|
// Pods were created before service, but trigger time is the time when service was created.
|
||||||
|
expect(t3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodsAdded(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t0)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service).expect(t0)
|
||||||
|
|
||||||
|
pod1 := createPod(ns, "pod1", t2)
|
||||||
|
pod2 := createPod(ns, "pod2", t1)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2).expect(t1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodsUpdated(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t0)
|
||||||
|
pod1 := createPod(ns, "pod1", t1)
|
||||||
|
pod2 := createPod(ns, "pod2", t2)
|
||||||
|
pod3 := createPod(ns, "pod3", t3)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2, pod3).expect(t0)
|
||||||
|
|
||||||
|
pod1 = createPod(ns, "pod1", t5)
|
||||||
|
pod2 = createPod(ns, "pod2", t4)
|
||||||
|
// pod3 doesn't change.
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2, pod3).expect(t4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodsUpdated_NoOp(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t0)
|
||||||
|
pod1 := createPod(ns, "pod1", t1)
|
||||||
|
pod2 := createPod(ns, "pod2", t2)
|
||||||
|
pod3 := createPod(ns, "pod3", t3)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2, pod3).expect(t0)
|
||||||
|
|
||||||
|
// Nothing has changed.
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2, pod3).expectNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodDeletedThenAdded(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t0)
|
||||||
|
pod1 := createPod(ns, "pod1", t1)
|
||||||
|
pod2 := createPod(ns, "pod2", t2)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2).expect(t0)
|
||||||
|
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1).expectNil()
|
||||||
|
|
||||||
|
pod2 = createPod(ns, "pod2", t4)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2).expect(t4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceDeletedThenAdded(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t0)
|
||||||
|
pod1 := createPod(ns, "pod1", t1)
|
||||||
|
pod2 := createPod(ns, "pod2", t2)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2).expect(t0)
|
||||||
|
|
||||||
|
tester.DeleteEndpoints(ns, name)
|
||||||
|
|
||||||
|
service = createService(ns, name, t3)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2).expect(t3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceUpdated_NoPodChange(t *testing.T) {
|
||||||
|
tester := newTester(t)
|
||||||
|
|
||||||
|
service := createService(ns, name, t0)
|
||||||
|
pod1 := createPod(ns, "pod1", t1)
|
||||||
|
pod2 := createPod(ns, "pod2", t2)
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2).expect(t0)
|
||||||
|
|
||||||
|
// service's ports have changed.
|
||||||
|
service.Spec = v1.ServiceSpec{
|
||||||
|
Selector: map[string]string{},
|
||||||
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently we're not able to calculate trigger time for service updates, hence the returned
|
||||||
|
// value is a nil time.
|
||||||
|
tester.whenComputeEndpointsLastChangeTriggerTime(ns, name, service, pod1, pod2).expectNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------- Test Utils -------
|
||||||
|
|
||||||
|
type tester struct {
|
||||||
|
*TriggerTimeTracker
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTester(t *testing.T) *tester {
|
||||||
|
return &tester{NewTriggerTimeTracker(), t}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tester) whenComputeEndpointsLastChangeTriggerTime(
|
||||||
|
namespace, name string, service *v1.Service, pods ...*v1.Pod) subject {
|
||||||
|
return subject{t.ComputeEndpointsLastChangeTriggerTime(namespace, name, service, pods), t.t}
|
||||||
|
}
|
||||||
|
|
||||||
|
type subject struct {
|
||||||
|
got time.Time
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s subject) expect(expected time.Time) {
|
||||||
|
s.doExpect(expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s subject) expectNil() {
|
||||||
|
s.doExpect(time.Time{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s subject) doExpect(expected time.Time) {
|
||||||
|
if s.got != expected {
|
||||||
|
_, fn, line, _ := runtime.Caller(2)
|
||||||
|
s.t.Errorf("Wrong trigger time in %s:%d expected %s, got %s", fn, line, expected, s.got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPod(namespace, name string, readyTime time.Time) *v1.Pod {
|
||||||
|
return &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name},
|
||||||
|
Status: v1.PodStatus{Conditions: []v1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: v1.PodReady,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
LastTransitionTime: metav1.NewTime(readyTime),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createService(namespace, name string, creationTime time.Time) *v1.Service {
|
||||||
|
return &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: name,
|
||||||
|
CreationTimestamp: metav1.NewTime(creationTime),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@ go_library(
|
|||||||
"scheduler_binder.go",
|
"scheduler_binder.go",
|
||||||
"scheduler_binder_cache.go",
|
"scheduler_binder_cache.go",
|
||||||
"scheduler_binder_fake.go",
|
"scheduler_binder_fake.go",
|
||||||
|
"util.go",
|
||||||
"volume_host.go",
|
"volume_host.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume",
|
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume",
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ func findMatchingVolume(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if isVolumeBoundToClaim(volume, claim) {
|
if IsVolumeBoundToClaim(volume, claim) {
|
||||||
// this claim and volume are pre-bound; return
|
// this claim and volume are pre-bound; return
|
||||||
// the volume if the size request is satisfied,
|
// the volume if the size request is satisfied,
|
||||||
// otherwise continue searching for a match
|
// otherwise continue searching for a match
|
||||||
|
|||||||
@@ -294,24 +294,6 @@ func (ctrl *PersistentVolumeController) isDelayBindingProvisioning(claim *v1.Per
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ctrl *PersistentVolumeController) isDelayBindingMode(claim *v1.PersistentVolumeClaim) (bool, error) {
|
|
||||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
|
||||||
if className == "" {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
class, err := ctrl.classLister.Get(className)
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if class.VolumeBindingMode == nil {
|
|
||||||
return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className)
|
|
||||||
}
|
|
||||||
|
|
||||||
return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldDelayBinding returns true if binding of claim should be delayed, false otherwise.
|
// shouldDelayBinding returns true if binding of claim should be delayed, false otherwise.
|
||||||
// If binding of claim should be delayed, only claims pbound by scheduler
|
// If binding of claim should be delayed, only claims pbound by scheduler
|
||||||
func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentVolumeClaim) (bool, error) {
|
func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentVolumeClaim) (bool, error) {
|
||||||
@@ -321,7 +303,7 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If claim is in delay binding mode.
|
// If claim is in delay binding mode.
|
||||||
return ctrl.isDelayBindingMode(claim)
|
return IsDelayBindingMode(claim, ctrl.classLister)
|
||||||
}
|
}
|
||||||
|
|
||||||
// syncUnboundClaim is the main controller method to decide what to do with an
|
// syncUnboundClaim is the main controller method to decide what to do with an
|
||||||
@@ -419,7 +401,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
|
|||||||
}
|
}
|
||||||
// OBSERVATION: pvc is "Bound", pv is "Bound"
|
// OBSERVATION: pvc is "Bound", pv is "Bound"
|
||||||
return nil
|
return nil
|
||||||
} else if isVolumeBoundToClaim(volume, claim) {
|
} else if IsVolumeBoundToClaim(volume, claim) {
|
||||||
// User asked for a PV that is claimed by this PVC
|
// User asked for a PV that is claimed by this PVC
|
||||||
// OBSERVATION: pvc is "Pending", pv is "Bound"
|
// OBSERVATION: pvc is "Pending", pv is "Bound"
|
||||||
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim))
|
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim))
|
||||||
@@ -863,7 +845,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe
|
|||||||
func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
|
func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim))
|
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim))
|
||||||
|
|
||||||
volumeClone, dirty, err := ctrl.getBindVolumeToClaim(volume, claim)
|
volumeClone, dirty, err := GetBindVolumeToClaim(volume, claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -897,43 +879,6 @@ func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.
|
|||||||
return newVol, nil
|
return newVol, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get new PV object only, no API or cache update
|
|
||||||
func (ctrl *PersistentVolumeController) getBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) {
|
|
||||||
dirty := false
|
|
||||||
|
|
||||||
// Check if the volume was already bound (either by user or by controller)
|
|
||||||
shouldSetBoundByController := false
|
|
||||||
if !isVolumeBoundToClaim(volume, claim) {
|
|
||||||
shouldSetBoundByController = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The volume from method args can be pointing to watcher cache. We must not
|
|
||||||
// modify these, therefore create a copy.
|
|
||||||
volumeClone := volume.DeepCopy()
|
|
||||||
|
|
||||||
// Bind the volume to the claim if it is not bound yet
|
|
||||||
if volume.Spec.ClaimRef == nil ||
|
|
||||||
volume.Spec.ClaimRef.Name != claim.Name ||
|
|
||||||
volume.Spec.ClaimRef.Namespace != claim.Namespace ||
|
|
||||||
volume.Spec.ClaimRef.UID != claim.UID {
|
|
||||||
|
|
||||||
claimRef, err := ref.GetReference(scheme.Scheme, claim)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("Unexpected error getting claim reference: %v", err)
|
|
||||||
}
|
|
||||||
volumeClone.Spec.ClaimRef = claimRef
|
|
||||||
dirty = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set annBoundByController if it is not set yet
|
|
||||||
if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, annBoundByController) {
|
|
||||||
metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, annBoundByController, "yes")
|
|
||||||
dirty = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return volumeClone, dirty, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bindClaimToVolume modifies the given claim to be bound to a volume and
|
// bindClaimToVolume modifies the given claim to be bound to a volume and
|
||||||
// saves it to API server. The volume is not modified in this method!
|
// saves it to API server. The volume is not modified in this method!
|
||||||
func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) {
|
func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) {
|
||||||
|
|||||||
@@ -467,22 +467,6 @@ func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
|
|||||||
return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
|
return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound
|
|
||||||
// to specific claim. Both claim.Name and claim.Namespace must be equal.
|
|
||||||
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
|
|
||||||
func isVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool {
|
|
||||||
if volume.Spec.ClaimRef == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// storeObjectUpdate updates given cache with a new object version from Informer
|
// storeObjectUpdate updates given cache with a new object version from Informer
|
||||||
// callback (i.e. with events from etcd) or with an object modified by the
|
// callback (i.e. with events from etcd) or with an object modified by the
|
||||||
// controller itself. Returns "true", if the cache was updated, false if the
|
// controller itself. Returns "true", if the cache was updated, false if the
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ import (
|
|||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
@@ -97,7 +98,8 @@ type SchedulerVolumeBinder interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type volumeBinder struct {
|
type volumeBinder struct {
|
||||||
ctrl *PersistentVolumeController
|
kubeClient clientset.Interface
|
||||||
|
classLister storagelisters.StorageClassLister
|
||||||
|
|
||||||
nodeInformer coreinformers.NodeInformer
|
nodeInformer coreinformers.NodeInformer
|
||||||
pvcCache PVCAssumeCache
|
pvcCache PVCAssumeCache
|
||||||
@@ -120,14 +122,9 @@ func NewVolumeBinder(
|
|||||||
storageClassInformer storageinformers.StorageClassInformer,
|
storageClassInformer storageinformers.StorageClassInformer,
|
||||||
bindTimeout time.Duration) SchedulerVolumeBinder {
|
bindTimeout time.Duration) SchedulerVolumeBinder {
|
||||||
|
|
||||||
// TODO: find better way...
|
b := &volumeBinder{
|
||||||
ctrl := &PersistentVolumeController{
|
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
classLister: storageClassInformer.Lister(),
|
classLister: storageClassInformer.Lister(),
|
||||||
}
|
|
||||||
|
|
||||||
b := &volumeBinder{
|
|
||||||
ctrl: ctrl,
|
|
||||||
nodeInformer: nodeInformer,
|
nodeInformer: nodeInformer,
|
||||||
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
|
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
|
||||||
pvCache: NewPVAssumeCache(pvInformer.Informer()),
|
pvCache: NewPVAssumeCache(pvInformer.Informer()),
|
||||||
@@ -291,8 +288,8 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
|||||||
// Assume PV
|
// Assume PV
|
||||||
newBindings := []*bindingInfo{}
|
newBindings := []*bindingInfo{}
|
||||||
for _, binding := range claimsToBind {
|
for _, binding := range claimsToBind {
|
||||||
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
|
newPV, dirty, err := GetBindVolumeToClaim(binding.pv, binding.pvc)
|
||||||
klog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
klog.V(5).Infof("AssumePodVolumes: GetBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||||
podName,
|
podName,
|
||||||
binding.pv.Name,
|
binding.pv.Name,
|
||||||
binding.pvc.Name,
|
binding.pvc.Name,
|
||||||
@@ -411,9 +408,13 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
|||||||
for _, binding = range bindings {
|
for _, binding = range bindings {
|
||||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
|
klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
|
||||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||||
if newPV, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil {
|
claimKey := claimToClaimKey(binding.pvc)
|
||||||
|
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
||||||
|
if newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv); err != nil {
|
||||||
|
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
|
klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey)
|
||||||
// Save updated object from apiserver for later checking.
|
// Save updated object from apiserver for later checking.
|
||||||
binding.pv = newPV
|
binding.pv = newPV
|
||||||
}
|
}
|
||||||
@@ -424,7 +425,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
|||||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||||
for i, claim = range claimsToProvision {
|
for i, claim = range claimsToProvision {
|
||||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||||
if newClaim, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
if newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
// Save updated object from apiserver for later checking.
|
// Save updated object from apiserver for later checking.
|
||||||
@@ -627,7 +628,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
|||||||
if volumeBound {
|
if volumeBound {
|
||||||
boundClaims = append(boundClaims, pvc)
|
boundClaims = append(boundClaims, pvc)
|
||||||
} else {
|
} else {
|
||||||
delayBindingMode, err := b.ctrl.isDelayBindingMode(pvc)
|
delayBindingMode, err := IsDelayBindingMode(pvc, b.classLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -726,7 +727,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
|||||||
return false, nil, fmt.Errorf("no class for claim %q", pvcName)
|
return false, nil, fmt.Errorf("no class for claim %q", pvcName)
|
||||||
}
|
}
|
||||||
|
|
||||||
class, err := b.ctrl.classLister.Get(className)
|
class, err := b.classLister.Get(className)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, fmt.Errorf("failed to find storage class %q", className)
|
return false, nil, fmt.Errorf("failed to find storage class %q", className)
|
||||||
}
|
}
|
||||||
|
|||||||
103
pkg/controller/volume/persistentvolume/util.go
Normal file
103
pkg/controller/volume/persistentvolume/util.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package persistentvolume
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
storage "k8s.io/api/storage/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
|
"k8s.io/client-go/tools/reference"
|
||||||
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsDelayBindingMode checks if claim is in delay binding mode.
|
||||||
|
func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) {
|
||||||
|
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||||
|
if className == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
class, err := classLister.Get(className)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if class.VolumeBindingMode == nil {
|
||||||
|
return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className)
|
||||||
|
}
|
||||||
|
|
||||||
|
return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBindVolumeToClaim returns a new volume which is bound to given claim. In
|
||||||
|
// addition, it returns a bool which indicates whether we made modification on
|
||||||
|
// original volume.
|
||||||
|
func GetBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) {
|
||||||
|
dirty := false
|
||||||
|
|
||||||
|
// Check if the volume was already bound (either by user or by controller)
|
||||||
|
shouldSetBoundByController := false
|
||||||
|
if !IsVolumeBoundToClaim(volume, claim) {
|
||||||
|
shouldSetBoundByController = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The volume from method args can be pointing to watcher cache. We must not
|
||||||
|
// modify these, therefore create a copy.
|
||||||
|
volumeClone := volume.DeepCopy()
|
||||||
|
|
||||||
|
// Bind the volume to the claim if it is not bound yet
|
||||||
|
if volume.Spec.ClaimRef == nil ||
|
||||||
|
volume.Spec.ClaimRef.Name != claim.Name ||
|
||||||
|
volume.Spec.ClaimRef.Namespace != claim.Namespace ||
|
||||||
|
volume.Spec.ClaimRef.UID != claim.UID {
|
||||||
|
|
||||||
|
claimRef, err := reference.GetReference(scheme.Scheme, claim)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("Unexpected error getting claim reference: %v", err)
|
||||||
|
}
|
||||||
|
volumeClone.Spec.ClaimRef = claimRef
|
||||||
|
dirty = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set annBoundByController if it is not set yet
|
||||||
|
if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, annBoundByController) {
|
||||||
|
metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, annBoundByController, "yes")
|
||||||
|
dirty = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return volumeClone, dirty, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsVolumeBoundToClaim returns true, if given volume is pre-bound or bound
|
||||||
|
// to specific claim. Both claim.Name and claim.Namespace must be equal.
|
||||||
|
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
|
||||||
|
func IsVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool {
|
||||||
|
if volume.Spec.ClaimRef == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
@@ -96,6 +96,7 @@ type DeleteOptions struct {
|
|||||||
LabelSelector string
|
LabelSelector string
|
||||||
FieldSelector string
|
FieldSelector string
|
||||||
DeleteAll bool
|
DeleteAll bool
|
||||||
|
DeleteAllNamespaces bool
|
||||||
IgnoreNotFound bool
|
IgnoreNotFound bool
|
||||||
Cascade bool
|
Cascade bool
|
||||||
DeleteNow bool
|
DeleteNow bool
|
||||||
@@ -170,6 +171,7 @@ func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Co
|
|||||||
LabelSelectorParam(o.LabelSelector).
|
LabelSelectorParam(o.LabelSelector).
|
||||||
FieldSelectorParam(o.FieldSelector).
|
FieldSelectorParam(o.FieldSelector).
|
||||||
SelectAllParam(o.DeleteAll).
|
SelectAllParam(o.DeleteAll).
|
||||||
|
AllNamespaces(o.DeleteAllNamespaces).
|
||||||
ResourceTypeOrNameArgs(false, args...).RequireObject(false).
|
ResourceTypeOrNameArgs(false, args...).RequireObject(false).
|
||||||
Flatten().
|
Flatten().
|
||||||
Do()
|
Do()
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ type DeleteFlags struct {
|
|||||||
FieldSelector *string
|
FieldSelector *string
|
||||||
|
|
||||||
All *bool
|
All *bool
|
||||||
|
AllNamespaces *bool
|
||||||
Cascade *bool
|
Cascade *bool
|
||||||
Force *bool
|
Force *bool
|
||||||
GracePeriod *int
|
GracePeriod *int
|
||||||
@@ -68,6 +69,9 @@ func (f *DeleteFlags) ToOptions(dynamicClient dynamic.Interface, streams generic
|
|||||||
if f.All != nil {
|
if f.All != nil {
|
||||||
options.DeleteAll = *f.All
|
options.DeleteAll = *f.All
|
||||||
}
|
}
|
||||||
|
if f.AllNamespaces != nil {
|
||||||
|
options.DeleteAllNamespaces = *f.AllNamespaces
|
||||||
|
}
|
||||||
if f.Cascade != nil {
|
if f.Cascade != nil {
|
||||||
options.Cascade = *f.Cascade
|
options.Cascade = *f.Cascade
|
||||||
}
|
}
|
||||||
@@ -104,6 +108,9 @@ func (f *DeleteFlags) AddFlags(cmd *cobra.Command) {
|
|||||||
if f.All != nil {
|
if f.All != nil {
|
||||||
cmd.Flags().BoolVar(f.All, "all", *f.All, "Delete all resources, including uninitialized ones, in the namespace of the specified resource types.")
|
cmd.Flags().BoolVar(f.All, "all", *f.All, "Delete all resources, including uninitialized ones, in the namespace of the specified resource types.")
|
||||||
}
|
}
|
||||||
|
if f.AllNamespaces != nil {
|
||||||
|
cmd.Flags().BoolVarP(f.AllNamespaces, "all-namespaces", "A", *f.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.")
|
||||||
|
}
|
||||||
if f.Force != nil {
|
if f.Force != nil {
|
||||||
cmd.Flags().BoolVar(f.Force, "force", *f.Force, "Only used when grace-period=0. If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation.")
|
cmd.Flags().BoolVar(f.Force, "force", *f.Force, "Only used when grace-period=0. If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation.")
|
||||||
}
|
}
|
||||||
@@ -137,6 +144,7 @@ func NewDeleteCommandFlags(usage string) *DeleteFlags {
|
|||||||
|
|
||||||
// setup command defaults
|
// setup command defaults
|
||||||
all := false
|
all := false
|
||||||
|
allNamespaces := false
|
||||||
force := false
|
force := false
|
||||||
ignoreNotFound := false
|
ignoreNotFound := false
|
||||||
now := false
|
now := false
|
||||||
@@ -158,6 +166,7 @@ func NewDeleteCommandFlags(usage string) *DeleteFlags {
|
|||||||
GracePeriod: &gracePeriod,
|
GracePeriod: &gracePeriod,
|
||||||
|
|
||||||
All: &all,
|
All: &all,
|
||||||
|
AllNamespaces: &allNamespaces,
|
||||||
Force: &force,
|
Force: &force,
|
||||||
IgnoreNotFound: &ignoreNotFound,
|
IgnoreNotFound: &ignoreNotFound,
|
||||||
Now: &now,
|
Now: &now,
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ go_library(
|
|||||||
"//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library",
|
"//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library",
|
||||||
"//vendor/sigs.k8s.io/structured-merge-diff/fieldpath:go_default_library",
|
"//vendor/sigs.k8s.io/structured-merge-diff/fieldpath:go_default_library",
|
||||||
"//vendor/sigs.k8s.io/structured-merge-diff/merge:go_default_library",
|
"//vendor/sigs.k8s.io/structured-merge-diff/merge:go_default_library",
|
||||||
|
"//vendor/sigs.k8s.io/structured-merge-diff/schema:go_default_library",
|
||||||
"//vendor/sigs.k8s.io/structured-merge-diff/typed:go_default_library",
|
"//vendor/sigs.k8s.io/structured-merge-diff/typed:go_default_library",
|
||||||
"//vendor/sigs.k8s.io/structured-merge-diff/value:go_default_library",
|
"//vendor/sigs.k8s.io/structured-merge-diff/value:go_default_library",
|
||||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/kube-openapi/pkg/schemaconv"
|
"k8s.io/kube-openapi/pkg/schemaconv"
|
||||||
"k8s.io/kube-openapi/pkg/util/proto"
|
"k8s.io/kube-openapi/pkg/util/proto"
|
||||||
|
smdschema "sigs.k8s.io/structured-merge-diff/schema"
|
||||||
"sigs.k8s.io/structured-merge-diff/typed"
|
"sigs.k8s.io/structured-merge-diff/typed"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -48,6 +49,7 @@ func newGVKParser(models proto.Models) (*gvkParser, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to convert models to schema: %v", err)
|
return nil, fmt.Errorf("failed to convert models to schema: %v", err)
|
||||||
}
|
}
|
||||||
|
typeSchema = makeRawExtensionUntyped(typeSchema)
|
||||||
parser := gvkParser{
|
parser := gvkParser{
|
||||||
gvks: map[schema.GroupVersionKind]string{},
|
gvks: map[schema.GroupVersionKind]string{},
|
||||||
}
|
}
|
||||||
@@ -114,3 +116,20 @@ func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind {
|
|||||||
|
|
||||||
return gvkListResult
|
return gvkListResult
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// makeRawExtensionUntyped explicitly sets RawExtension's type in the schema to Untyped atomic
|
||||||
|
// TODO: remove this once kube-openapi is updated to include
|
||||||
|
// https://github.com/kubernetes/kube-openapi/pull/133
|
||||||
|
func makeRawExtensionUntyped(s *smdschema.Schema) *smdschema.Schema {
|
||||||
|
s2 := &smdschema.Schema{}
|
||||||
|
for _, t := range s.Types {
|
||||||
|
t2 := t
|
||||||
|
if t2.Name == "io.k8s.apimachinery.pkg.runtime.RawExtension" {
|
||||||
|
t2.Atom = smdschema.Atom{
|
||||||
|
Untyped: &smdschema.Untyped{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s2.Types = append(s2.Types, t2)
|
||||||
|
}
|
||||||
|
return s2
|
||||||
|
}
|
||||||
|
|||||||
40
staging/src/k8s.io/code-generator/Godeps/Godeps.json
generated
40
staging/src/k8s.io/code-generator/Godeps/Godeps.json
generated
@@ -112,11 +112,47 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/gcexportdata",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/internal/cgo",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/internal/gcimporter",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/internal/packagesdriver",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/go/packages",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/tools/imports",
|
"ImportPath": "golang.org/x/tools/imports",
|
||||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/fastwalk",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/gopathwalk",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/module",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/tools/internal/semver",
|
||||||
|
"Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "k8s.io/gengo/args",
|
"ImportPath": "k8s.io/gengo/args",
|
||||||
|
|||||||
@@ -40,6 +40,11 @@ func NewAWSElasticBlockStoreCSITranslator() InTreePlugin {
|
|||||||
return &awsElasticBlockStoreCSITranslator{}
|
return &awsElasticBlockStoreCSITranslator{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TranslateInTreeStorageClassParametersToCSI translates InTree EBS storage class parameters to CSI storage class
|
||||||
|
func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error) {
|
||||||
|
return scParameters, nil
|
||||||
|
}
|
||||||
|
|
||||||
// TranslateInTreePVToCSI takes a PV with AWSElasticBlockStore set from in-tree
|
// TranslateInTreePVToCSI takes a PV with AWSElasticBlockStore set from in-tree
|
||||||
// and converts the AWSElasticBlockStore source to a CSIPersistentVolumeSource
|
// and converts the AWSElasticBlockStore source to a CSIPersistentVolumeSource
|
||||||
func (t *awsElasticBlockStoreCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
func (t *awsElasticBlockStoreCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||||
|
|||||||
@@ -58,6 +58,11 @@ func NewGCEPersistentDiskCSITranslator() InTreePlugin {
|
|||||||
return &gcePersistentDiskCSITranslator{}
|
return &gcePersistentDiskCSITranslator{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TranslateInTreeStorageClassParametersToCSI translates InTree GCE storage class parameters to CSI storage class
|
||||||
|
func (g *gcePersistentDiskCSITranslator) TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error) {
|
||||||
|
return scParameters, nil
|
||||||
|
}
|
||||||
|
|
||||||
// TranslateInTreePVToCSI takes a PV with GCEPersistentDisk set from in-tree
|
// TranslateInTreePVToCSI takes a PV with GCEPersistentDisk set from in-tree
|
||||||
// and converts the GCEPersistentDisk source to a CSIPersistentVolumeSource
|
// and converts the GCEPersistentDisk source to a CSIPersistentVolumeSource
|
||||||
func (g *gcePersistentDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
func (g *gcePersistentDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||||
|
|||||||
@@ -20,6 +20,11 @@ import "k8s.io/api/core/v1"
|
|||||||
|
|
||||||
// InTreePlugin handles translations between CSI and in-tree sources in a PV
|
// InTreePlugin handles translations between CSI and in-tree sources in a PV
|
||||||
type InTreePlugin interface {
|
type InTreePlugin interface {
|
||||||
|
|
||||||
|
// TranslateInTreeStorageClassParametersToCSI takes in-tree storage class
|
||||||
|
// parameters and translates them to a set of parameters consumable by CSI plugin
|
||||||
|
TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error)
|
||||||
|
|
||||||
// TranslateInTreePVToCSI takes a persistent volume and will translate
|
// TranslateInTreePVToCSI takes a persistent volume and will translate
|
||||||
// the in-tree source to a CSI Source. The input persistent volume can be modified
|
// the in-tree source to a CSI Source. The input persistent volume can be modified
|
||||||
TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
|
TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
|
||||||
|
|||||||
@@ -38,6 +38,11 @@ func NewOpenStackCinderCSITranslator() InTreePlugin {
|
|||||||
return &osCinderCSITranslator{}
|
return &osCinderCSITranslator{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TranslateInTreeStorageClassParametersToCSI translates InTree Cinder storage class parameters to CSI storage class
|
||||||
|
func (t *osCinderCSITranslator) TranslateInTreeStorageClassParametersToCSI(scParameters map[string]string) (map[string]string, error) {
|
||||||
|
return scParameters, nil
|
||||||
|
}
|
||||||
|
|
||||||
// TranslateInTreePVToCSI takes a PV with Cinder set from in-tree
|
// TranslateInTreePVToCSI takes a PV with Cinder set from in-tree
|
||||||
// and converts the Cinder source to a CSIPersistentVolumeSource
|
// and converts the Cinder source to a CSIPersistentVolumeSource
|
||||||
func (t *osCinderCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
func (t *osCinderCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||||
|
|||||||
@@ -31,6 +31,17 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TranslateInTreeStorageClassParametersToCSI takes in-tree storage class
|
||||||
|
// parameters and translates them to a set of parameters consumable by CSI plugin
|
||||||
|
func TranslateInTreeStorageClassParametersToCSI(inTreePluginName string, scParameters map[string]string) (map[string]string, error) {
|
||||||
|
for _, curPlugin := range inTreePlugins {
|
||||||
|
if inTreePluginName == curPlugin.GetInTreePluginName() {
|
||||||
|
return curPlugin.TranslateInTreeStorageClassParametersToCSI(scParameters)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("could not find in-tree storage class parameter translation logic for %#v", inTreePluginName)
|
||||||
|
}
|
||||||
|
|
||||||
// TranslateInTreePVToCSI takes a persistent volume and will translate
|
// TranslateInTreePVToCSI takes a persistent volume and will translate
|
||||||
// the in-tree source to a CSI Source if the translation logic
|
// the in-tree source to a CSI Source if the translation logic
|
||||||
// has been implemented. The input persistent volume will not
|
// has been implemented. The input persistent volume will not
|
||||||
|
|||||||
45
test/cmd/delete.sh
Executable file
45
test/cmd/delete.sh
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Copyright 2018 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Runs tests related to kubectl delete --all-namespaces.
|
||||||
|
run_kubectl_delete_allnamespaces_tests() {
|
||||||
|
set -o nounset
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
ns_one="namespace-$(date +%s)-${RANDOM}"
|
||||||
|
ns_two="namespace-$(date +%s)-${RANDOM}"
|
||||||
|
kubectl create namespace "${ns_one}"
|
||||||
|
kubectl create namespace "${ns_two}"
|
||||||
|
|
||||||
|
kubectl create configmap "one" --namespace="${ns_one}"
|
||||||
|
kubectl create configmap "two" --namespace="${ns_two}"
|
||||||
|
kubectl label configmap "one" --namespace="${ns_one}" deletetest=true
|
||||||
|
kubectl label configmap "two" --namespace="${ns_two}" deletetest=true
|
||||||
|
kubectl delete configmap -l deletetest=true --all-namespaces
|
||||||
|
|
||||||
|
# no configmaps should be in either of those namespaces
|
||||||
|
kubectl config set-context "${CONTEXT}" --namespace="${ns_one}"
|
||||||
|
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||||
|
kubectl config set-context "${CONTEXT}" --namespace="${ns_two}"
|
||||||
|
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||||
|
|
||||||
|
set +o nounset
|
||||||
|
set +o errexit
|
||||||
|
}
|
||||||
@@ -35,6 +35,7 @@ source "${KUBE_ROOT}/test/cmd/certificate.sh"
|
|||||||
source "${KUBE_ROOT}/test/cmd/core.sh"
|
source "${KUBE_ROOT}/test/cmd/core.sh"
|
||||||
source "${KUBE_ROOT}/test/cmd/crd.sh"
|
source "${KUBE_ROOT}/test/cmd/crd.sh"
|
||||||
source "${KUBE_ROOT}/test/cmd/create.sh"
|
source "${KUBE_ROOT}/test/cmd/create.sh"
|
||||||
|
source "${KUBE_ROOT}/test/cmd/delete.sh"
|
||||||
source "${KUBE_ROOT}/test/cmd/diff.sh"
|
source "${KUBE_ROOT}/test/cmd/diff.sh"
|
||||||
source "${KUBE_ROOT}/test/cmd/discovery.sh"
|
source "${KUBE_ROOT}/test/cmd/discovery.sh"
|
||||||
source "${KUBE_ROOT}/test/cmd/generic-resources.sh"
|
source "${KUBE_ROOT}/test/cmd/generic-resources.sh"
|
||||||
@@ -499,6 +500,13 @@ runTests() {
|
|||||||
record_command run_create_secret_tests
|
record_command run_create_secret_tests
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
######################
|
||||||
|
# Delete #
|
||||||
|
######################
|
||||||
|
if kube::test::if_supports_resource "${configmaps}" ; then
|
||||||
|
record_command run_kubectl_delete_allnamespaces_tests
|
||||||
|
fi
|
||||||
|
|
||||||
##################
|
##################
|
||||||
# Global timeout #
|
# Global timeout #
|
||||||
##################
|
##################
|
||||||
|
|||||||
11
vendor/BUILD
vendored
11
vendor/BUILD
vendored
@@ -69,6 +69,7 @@ filegroup(
|
|||||||
"//vendor/github.com/bazelbuild/buildtools/tables:all-srcs",
|
"//vendor/github.com/bazelbuild/buildtools/tables:all-srcs",
|
||||||
"//vendor/github.com/beorn7/perks/quantile:all-srcs",
|
"//vendor/github.com/beorn7/perks/quantile:all-srcs",
|
||||||
"//vendor/github.com/blang/semver:all-srcs",
|
"//vendor/github.com/blang/semver:all-srcs",
|
||||||
|
"//vendor/github.com/cespare/prettybench:all-srcs",
|
||||||
"//vendor/github.com/chai2010/gettext-go/gettext:all-srcs",
|
"//vendor/github.com/chai2010/gettext-go/gettext:all-srcs",
|
||||||
"//vendor/github.com/client9/misspell:all-srcs",
|
"//vendor/github.com/client9/misspell:all-srcs",
|
||||||
"//vendor/github.com/cloudflare/cfssl/api:all-srcs",
|
"//vendor/github.com/cloudflare/cfssl/api:all-srcs",
|
||||||
@@ -285,6 +286,7 @@ filegroup(
|
|||||||
"//vendor/github.com/jmoiron/sqlx:all-srcs",
|
"//vendor/github.com/jmoiron/sqlx:all-srcs",
|
||||||
"//vendor/github.com/jonboulle/clockwork:all-srcs",
|
"//vendor/github.com/jonboulle/clockwork:all-srcs",
|
||||||
"//vendor/github.com/json-iterator/go:all-srcs",
|
"//vendor/github.com/json-iterator/go:all-srcs",
|
||||||
|
"//vendor/github.com/jstemmer/go-junit-report:all-srcs",
|
||||||
"//vendor/github.com/jteeuwen/go-bindata:all-srcs",
|
"//vendor/github.com/jteeuwen/go-bindata:all-srcs",
|
||||||
"//vendor/github.com/kardianos/osext:all-srcs",
|
"//vendor/github.com/kardianos/osext:all-srcs",
|
||||||
"//vendor/github.com/karrick/godirwalk:all-srcs",
|
"//vendor/github.com/karrick/godirwalk:all-srcs",
|
||||||
@@ -419,9 +421,16 @@ filegroup(
|
|||||||
"//vendor/golang.org/x/tools/container/intsets:all-srcs",
|
"//vendor/golang.org/x/tools/container/intsets:all-srcs",
|
||||||
"//vendor/golang.org/x/tools/go/ast/astutil:all-srcs",
|
"//vendor/golang.org/x/tools/go/ast/astutil:all-srcs",
|
||||||
"//vendor/golang.org/x/tools/go/gcexportdata:all-srcs",
|
"//vendor/golang.org/x/tools/go/gcexportdata:all-srcs",
|
||||||
"//vendor/golang.org/x/tools/go/gcimporter15:all-srcs",
|
"//vendor/golang.org/x/tools/go/internal/cgo:all-srcs",
|
||||||
|
"//vendor/golang.org/x/tools/go/internal/gcimporter:all-srcs",
|
||||||
|
"//vendor/golang.org/x/tools/go/internal/packagesdriver:all-srcs",
|
||||||
|
"//vendor/golang.org/x/tools/go/packages:all-srcs",
|
||||||
"//vendor/golang.org/x/tools/go/vcs:all-srcs",
|
"//vendor/golang.org/x/tools/go/vcs:all-srcs",
|
||||||
"//vendor/golang.org/x/tools/imports:all-srcs",
|
"//vendor/golang.org/x/tools/imports:all-srcs",
|
||||||
|
"//vendor/golang.org/x/tools/internal/fastwalk:all-srcs",
|
||||||
|
"//vendor/golang.org/x/tools/internal/gopathwalk:all-srcs",
|
||||||
|
"//vendor/golang.org/x/tools/internal/module:all-srcs",
|
||||||
|
"//vendor/golang.org/x/tools/internal/semver:all-srcs",
|
||||||
"//vendor/gonum.org/v1/gonum/blas:all-srcs",
|
"//vendor/gonum.org/v1/gonum/blas:all-srcs",
|
||||||
"//vendor/gonum.org/v1/gonum/floats:all-srcs",
|
"//vendor/gonum.org/v1/gonum/floats:all-srcs",
|
||||||
"//vendor/gonum.org/v1/gonum/graph:all-srcs",
|
"//vendor/gonum.org/v1/gonum/graph:all-srcs",
|
||||||
|
|||||||
1
vendor/github.com/cespare/prettybench/.gitignore
generated
vendored
Normal file
1
vendor/github.com/cespare/prettybench/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
/prettybench
|
||||||
30
vendor/github.com/cespare/prettybench/BUILD
generated
vendored
Normal file
30
vendor/github.com/cespare/prettybench/BUILD
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["prettybench.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/cespare/prettybench",
|
||||||
|
importpath = "github.com/cespare/prettybench",
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
deps = ["//vendor/golang.org/x/tools/benchmark/parse:go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_binary(
|
||||||
|
name = "prettybench",
|
||||||
|
embed = [":go_default_library"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
22
vendor/github.com/cespare/prettybench/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/prettybench/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
Copyright (c) 2014 Caleb Spare
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
36
vendor/github.com/cespare/prettybench/README.md
generated
vendored
Normal file
36
vendor/github.com/cespare/prettybench/README.md
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Prettybench
|
||||||
|
|
||||||
|
A tool for transforming `go test`'s benchmark output a bit to make it nicer for humans.
|
||||||
|
|
||||||
|
## Problem
|
||||||
|
|
||||||
|
Go benchmarks are great, particularly when used in concert with benchcmp. But the output can be a bit hard to
|
||||||
|
read:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Solution
|
||||||
|
|
||||||
|
$ go get github.com/cespare/prettybench
|
||||||
|
$ go test -bench=. | prettybench
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
* Column headers
|
||||||
|
* Columns are aligned
|
||||||
|
* Time output is adjusted to convenient units
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
* Right now the units for the time are chosen based on the smallest value in the column.
|
||||||
|
* Prettybench has to buffer all the rows of output before it can print them (for column formatting), so you
|
||||||
|
won't see intermediate progress. If you want to see that too, you could tee your output so that you see the
|
||||||
|
unmodified version as well. If you do this, you'll want to use the prettybench's `-no-passthrough` flag so
|
||||||
|
it doesn't print all the other lines (because then they'd be printed twice):
|
||||||
|
|
||||||
|
$ go test -bench=. | tee >(prettybench -no-passthrough)
|
||||||
|
|
||||||
|
## To Do (maybe)
|
||||||
|
|
||||||
|
* Handle benchcmp output
|
||||||
|
* Change the units for non-time columns as well (these are generally OK though).
|
||||||
198
vendor/github.com/cespare/prettybench/prettybench.go
generated
vendored
Normal file
198
vendor/github.com/cespare/prettybench/prettybench.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
bench "golang.org/x/tools/benchmark/parse"
|
||||||
|
)
|
||||||
|
|
||||||
|
var noPassthrough = flag.Bool("no-passthrough", false, "Don't print non-benchmark lines")
|
||||||
|
|
||||||
|
type BenchOutputGroup struct {
|
||||||
|
Lines []*bench.Benchmark
|
||||||
|
// Columns which are in use
|
||||||
|
Measured int
|
||||||
|
}
|
||||||
|
|
||||||
|
type Table struct {
|
||||||
|
MaxLengths []int
|
||||||
|
Cells [][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *BenchOutputGroup) String() string {
|
||||||
|
if len(g.Lines) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
columnNames := []string{"benchmark", "iter", "time/iter"}
|
||||||
|
if (g.Measured & bench.MBPerS) > 0 {
|
||||||
|
columnNames = append(columnNames, "throughput")
|
||||||
|
}
|
||||||
|
if (g.Measured & bench.AllocedBytesPerOp) > 0 {
|
||||||
|
columnNames = append(columnNames, "bytes alloc")
|
||||||
|
}
|
||||||
|
if (g.Measured & bench.AllocsPerOp) > 0 {
|
||||||
|
columnNames = append(columnNames, "allocs")
|
||||||
|
}
|
||||||
|
table := &Table{Cells: [][]string{columnNames}}
|
||||||
|
|
||||||
|
var underlines []string
|
||||||
|
for _, name := range columnNames {
|
||||||
|
underlines = append(underlines, strings.Repeat("-", len(name)))
|
||||||
|
}
|
||||||
|
table.Cells = append(table.Cells, underlines)
|
||||||
|
timeFormatFunc := g.TimeFormatFunc()
|
||||||
|
|
||||||
|
for _, line := range g.Lines {
|
||||||
|
row := []string{line.Name, FormatIterations(line.N), timeFormatFunc(line.NsPerOp)}
|
||||||
|
if (g.Measured & bench.MBPerS) > 0 {
|
||||||
|
row = append(row, FormatMegaBytesPerSecond(line))
|
||||||
|
}
|
||||||
|
if (g.Measured & bench.AllocedBytesPerOp) > 0 {
|
||||||
|
row = append(row, FormatBytesAllocPerOp(line))
|
||||||
|
}
|
||||||
|
if (g.Measured & bench.AllocsPerOp) > 0 {
|
||||||
|
row = append(row, FormatAllocsPerOp(line))
|
||||||
|
}
|
||||||
|
table.Cells = append(table.Cells, row)
|
||||||
|
}
|
||||||
|
for i := range columnNames {
|
||||||
|
maxLength := 0
|
||||||
|
for _, row := range table.Cells {
|
||||||
|
if len(row[i]) > maxLength {
|
||||||
|
maxLength = len(row[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
table.MaxLengths = append(table.MaxLengths, maxLength)
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for _, row := range table.Cells {
|
||||||
|
for i, cell := range row {
|
||||||
|
var format string
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
format = "%%-%ds "
|
||||||
|
case len(row) - 1:
|
||||||
|
format = "%%%ds"
|
||||||
|
default:
|
||||||
|
format = "%%%ds "
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, fmt.Sprintf(format, table.MaxLengths[i]), cell)
|
||||||
|
}
|
||||||
|
fmt.Fprint(&buf, "\n")
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatIterations(iter int) string {
|
||||||
|
return strconv.FormatInt(int64(iter), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *BenchOutputGroup) TimeFormatFunc() func(float64) string {
|
||||||
|
// Find the smallest time
|
||||||
|
smallest := g.Lines[0].NsPerOp
|
||||||
|
for _, line := range g.Lines[1:] {
|
||||||
|
if line.NsPerOp < smallest {
|
||||||
|
smallest = line.NsPerOp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case smallest < float64(10000*time.Nanosecond):
|
||||||
|
return func(ns float64) string {
|
||||||
|
return fmt.Sprintf("%.2f ns/op", ns)
|
||||||
|
}
|
||||||
|
case smallest < float64(time.Millisecond):
|
||||||
|
return func(ns float64) string {
|
||||||
|
return fmt.Sprintf("%.2f μs/op", ns/1000)
|
||||||
|
}
|
||||||
|
case smallest < float64(10*time.Second):
|
||||||
|
return func(ns float64) string {
|
||||||
|
return fmt.Sprintf("%.2f ms/op", (ns / 1e6))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return func(ns float64) string {
|
||||||
|
return fmt.Sprintf("%.2f s/op", ns/1e9)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatMegaBytesPerSecond(l *bench.Benchmark) string {
|
||||||
|
if (l.Measured & bench.MBPerS) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.2f MB/s", l.MBPerS)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatBytesAllocPerOp(l *bench.Benchmark) string {
|
||||||
|
if (l.Measured & bench.AllocedBytesPerOp) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d B/op", l.AllocedBytesPerOp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatAllocsPerOp(l *bench.Benchmark) string {
|
||||||
|
if (l.Measured & bench.AllocsPerOp) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d allocs/op", l.AllocsPerOp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *BenchOutputGroup) AddLine(line *bench.Benchmark) {
|
||||||
|
g.Lines = append(g.Lines, line)
|
||||||
|
g.Measured |= line.Measured
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
benchLineMatcher = regexp.MustCompile(`^Benchmark.*\t.*\d+`)
|
||||||
|
okLineMatcher = regexp.MustCompile(`^ok\s`)
|
||||||
|
notBenchLineErr = errors.New("Not a bench line")
|
||||||
|
)
|
||||||
|
|
||||||
|
func ParseLine(line string) (*bench.Benchmark, error) {
|
||||||
|
if !benchLineMatcher.MatchString(line) {
|
||||||
|
return nil, notBenchLineErr
|
||||||
|
}
|
||||||
|
fields := strings.Split(line, "\t")
|
||||||
|
if len(fields) < 3 {
|
||||||
|
return nil, notBenchLineErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return bench.ParseLine(line)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
currentBenchmark := &BenchOutputGroup{}
|
||||||
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
|
for scanner.Scan() {
|
||||||
|
text := scanner.Text()
|
||||||
|
line, err := ParseLine(text)
|
||||||
|
switch err {
|
||||||
|
case notBenchLineErr:
|
||||||
|
if okLineMatcher.MatchString(text) {
|
||||||
|
fmt.Print(currentBenchmark)
|
||||||
|
currentBenchmark = &BenchOutputGroup{}
|
||||||
|
}
|
||||||
|
if !*noPassthrough {
|
||||||
|
fmt.Println(text)
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
currentBenchmark.AddLine(line)
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "prettybench unrecognized line:")
|
||||||
|
fmt.Println(text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
1
vendor/github.com/jstemmer/go-junit-report/.gitignore
generated
vendored
Normal file
1
vendor/github.com/jstemmer/go-junit-report/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
go-junit-report
|
||||||
13
vendor/github.com/jstemmer/go-junit-report/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/jstemmer/go-junit-report/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- tip
|
||||||
|
- "1.10.x"
|
||||||
|
- "1.9.x"
|
||||||
|
- "1.8.x"
|
||||||
|
- "1.7.x"
|
||||||
|
- "1.6.x"
|
||||||
|
- "1.5.x"
|
||||||
|
- "1.4.x"
|
||||||
|
- "1.3.x"
|
||||||
|
- "1.2.x"
|
||||||
37
vendor/github.com/jstemmer/go-junit-report/BUILD
generated
vendored
Normal file
37
vendor/github.com/jstemmer/go-junit-report/BUILD
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["go-junit-report.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/jstemmer/go-junit-report",
|
||||||
|
importpath = "github.com/jstemmer/go-junit-report",
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/github.com/jstemmer/go-junit-report/formatter:go_default_library",
|
||||||
|
"//vendor/github.com/jstemmer/go-junit-report/parser:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_binary(
|
||||||
|
name = "go-junit-report",
|
||||||
|
embed = [":go_default_library"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [
|
||||||
|
":package-srcs",
|
||||||
|
"//vendor/github.com/jstemmer/go-junit-report/formatter:all-srcs",
|
||||||
|
"//vendor/github.com/jstemmer/go-junit-report/parser:all-srcs",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
20
vendor/github.com/jstemmer/go-junit-report/LICENSE
generated
vendored
Normal file
20
vendor/github.com/jstemmer/go-junit-report/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
Copyright (c) 2012 Joel Stemmer
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
46
vendor/github.com/jstemmer/go-junit-report/README.md
generated
vendored
Normal file
46
vendor/github.com/jstemmer/go-junit-report/README.md
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# go-junit-report
|
||||||
|
|
||||||
|
Converts `go test` output to an xml report, suitable for applications that
|
||||||
|
expect junit xml reports (e.g. [Jenkins](http://jenkins-ci.org)).
|
||||||
|
|
||||||
|
[![Build Status][travis-badge]][travis-link]
|
||||||
|
[![Report Card][report-badge]][report-link]
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Go version 1.1 or higher is required. Install or update using the `go get`
|
||||||
|
command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get -u github.com/jstemmer/go-junit-report
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contribution
|
||||||
|
|
||||||
|
Create an Issue and discuss the fix or feature, then fork the package.
|
||||||
|
Clone to github.com/jstemmer/go-junit-report. This is necessary because go import uses this path.
|
||||||
|
Fix or implement feature. Test and then commit change.
|
||||||
|
Specify #Issue and describe change in the commit message.
|
||||||
|
Create Pull Request. It can be merged by owner or administrator then.
|
||||||
|
|
||||||
|
## Run Tests
|
||||||
|
go test
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
go-junit-report reads the `go test` verbose output from standard in and writes
|
||||||
|
junit compatible XML to standard out.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go test -v 2>&1 | go-junit-report > report.xml
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that it also can parse benchmark output with `-bench` flag:
|
||||||
|
```bash
|
||||||
|
go test -v -bench . -count 5 2>&1 | go-junit-report > report.xml
|
||||||
|
```
|
||||||
|
|
||||||
|
[travis-badge]: https://travis-ci.org/jstemmer/go-junit-report.svg
|
||||||
|
[travis-link]: https://travis-ci.org/jstemmer/go-junit-report
|
||||||
|
[report-badge]: https://goreportcard.com/badge/github.com/jstemmer/go-junit-report
|
||||||
|
[report-link]: https://goreportcard.com/report/github.com/jstemmer/go-junit-report
|
||||||
24
vendor/github.com/jstemmer/go-junit-report/formatter/BUILD
generated
vendored
Normal file
24
vendor/github.com/jstemmer/go-junit-report/formatter/BUILD
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["formatter.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/jstemmer/go-junit-report/formatter",
|
||||||
|
importpath = "github.com/jstemmer/go-junit-report/formatter",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = ["//vendor/github.com/jstemmer/go-junit-report/parser:go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
182
vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go
generated
vendored
Normal file
182
vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/jstemmer/go-junit-report/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JUnitTestSuites is a collection of JUnit test suites.
|
||||||
|
type JUnitTestSuites struct {
|
||||||
|
XMLName xml.Name `xml:"testsuites"`
|
||||||
|
Suites []JUnitTestSuite
|
||||||
|
}
|
||||||
|
|
||||||
|
// JUnitTestSuite is a single JUnit test suite which may contain many
|
||||||
|
// testcases.
|
||||||
|
type JUnitTestSuite struct {
|
||||||
|
XMLName xml.Name `xml:"testsuite"`
|
||||||
|
Tests int `xml:"tests,attr"`
|
||||||
|
Failures int `xml:"failures,attr"`
|
||||||
|
Time string `xml:"time,attr"`
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
Properties []JUnitProperty `xml:"properties>property,omitempty"`
|
||||||
|
TestCases []JUnitTestCase
|
||||||
|
}
|
||||||
|
|
||||||
|
// JUnitTestCase is a single test case with its result.
|
||||||
|
type JUnitTestCase struct {
|
||||||
|
XMLName xml.Name `xml:"testcase"`
|
||||||
|
Classname string `xml:"classname,attr"`
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
Time string `xml:"time,attr"`
|
||||||
|
SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"`
|
||||||
|
Failure *JUnitFailure `xml:"failure,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JUnitSkipMessage contains the reason why a testcase was skipped.
|
||||||
|
type JUnitSkipMessage struct {
|
||||||
|
Message string `xml:"message,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JUnitProperty represents a key/value pair used to define properties.
|
||||||
|
type JUnitProperty struct {
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
Value string `xml:"value,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JUnitFailure contains data related to a failed test.
|
||||||
|
type JUnitFailure struct {
|
||||||
|
Message string `xml:"message,attr"`
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
Contents string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JUnitReportXML writes a JUnit xml representation of the given report to w
|
||||||
|
// in the format described at http://windyroad.org/dl/Open%20Source/JUnit.xsd
|
||||||
|
func JUnitReportXML(report *parser.Report, noXMLHeader bool, goVersion string, w io.Writer) error {
|
||||||
|
suites := JUnitTestSuites{}
|
||||||
|
|
||||||
|
// convert Report to JUnit test suites
|
||||||
|
for _, pkg := range report.Packages {
|
||||||
|
pkg.Benchmarks = mergeBenchmarks(pkg.Benchmarks)
|
||||||
|
ts := JUnitTestSuite{
|
||||||
|
Tests: len(pkg.Tests) + len(pkg.Benchmarks),
|
||||||
|
Failures: 0,
|
||||||
|
Time: formatTime(pkg.Duration),
|
||||||
|
Name: pkg.Name,
|
||||||
|
Properties: []JUnitProperty{},
|
||||||
|
TestCases: []JUnitTestCase{},
|
||||||
|
}
|
||||||
|
|
||||||
|
classname := pkg.Name
|
||||||
|
if idx := strings.LastIndex(classname, "/"); idx > -1 && idx < len(pkg.Name) {
|
||||||
|
classname = pkg.Name[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// properties
|
||||||
|
if goVersion == "" {
|
||||||
|
// if goVersion was not specified as a flag, fall back to version reported by runtime
|
||||||
|
goVersion = runtime.Version()
|
||||||
|
}
|
||||||
|
ts.Properties = append(ts.Properties, JUnitProperty{"go.version", goVersion})
|
||||||
|
if pkg.CoveragePct != "" {
|
||||||
|
ts.Properties = append(ts.Properties, JUnitProperty{"coverage.statements.pct", pkg.CoveragePct})
|
||||||
|
}
|
||||||
|
|
||||||
|
// individual test cases
|
||||||
|
for _, test := range pkg.Tests {
|
||||||
|
testCase := JUnitTestCase{
|
||||||
|
Classname: classname,
|
||||||
|
Name: test.Name,
|
||||||
|
Time: formatTime(test.Duration),
|
||||||
|
Failure: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.Result == parser.FAIL {
|
||||||
|
ts.Failures++
|
||||||
|
testCase.Failure = &JUnitFailure{
|
||||||
|
Message: "Failed",
|
||||||
|
Type: "",
|
||||||
|
Contents: strings.Join(test.Output, "\n"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.Result == parser.SKIP {
|
||||||
|
testCase.SkipMessage = &JUnitSkipMessage{strings.Join(test.Output, "\n")}
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.TestCases = append(ts.TestCases, testCase)
|
||||||
|
}
|
||||||
|
|
||||||
|
// individual benchmarks
|
||||||
|
for _, benchmark := range pkg.Benchmarks {
|
||||||
|
benchmarkCase := JUnitTestCase{
|
||||||
|
Classname: classname,
|
||||||
|
Name: benchmark.Name,
|
||||||
|
Time: formatBenchmarkTime(benchmark.Duration),
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.TestCases = append(ts.TestCases, benchmarkCase)
|
||||||
|
}
|
||||||
|
|
||||||
|
suites.Suites = append(suites.Suites, ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// to xml
|
||||||
|
bytes, err := xml.MarshalIndent(suites, "", "\t")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := bufio.NewWriter(w)
|
||||||
|
|
||||||
|
if !noXMLHeader {
|
||||||
|
writer.WriteString(xml.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.Write(bytes)
|
||||||
|
writer.WriteByte('\n')
|
||||||
|
writer.Flush()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeBenchmarks(benchmarks []*parser.Benchmark) []*parser.Benchmark {
|
||||||
|
var merged []*parser.Benchmark
|
||||||
|
benchmap := make(map[string][]*parser.Benchmark)
|
||||||
|
for _, bm := range benchmarks {
|
||||||
|
if _, ok := benchmap[bm.Name]; !ok {
|
||||||
|
merged = append(merged, &parser.Benchmark{Name: bm.Name})
|
||||||
|
}
|
||||||
|
benchmap[bm.Name] = append(benchmap[bm.Name], bm)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, bm := range merged {
|
||||||
|
for _, b := range benchmap[bm.Name] {
|
||||||
|
bm.Allocs += b.Allocs
|
||||||
|
bm.Bytes += b.Bytes
|
||||||
|
bm.Duration += b.Duration
|
||||||
|
}
|
||||||
|
n := len(benchmap[bm.Name])
|
||||||
|
bm.Allocs /= n
|
||||||
|
bm.Bytes /= n
|
||||||
|
bm.Duration /= time.Duration(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return merged
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatTime(d time.Duration) string {
|
||||||
|
return fmt.Sprintf("%.3f", d.Seconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatBenchmarkTime(d time.Duration) string {
|
||||||
|
return fmt.Sprintf("%.9f", d.Seconds())
|
||||||
|
}
|
||||||
51
vendor/github.com/jstemmer/go-junit-report/go-junit-report.go
generated
vendored
Normal file
51
vendor/github.com/jstemmer/go-junit-report/go-junit-report.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/jstemmer/go-junit-report/formatter"
|
||||||
|
"github.com/jstemmer/go-junit-report/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
noXMLHeader bool
|
||||||
|
packageName string
|
||||||
|
goVersionFlag string
|
||||||
|
setExitCode bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.BoolVar(&noXMLHeader, "no-xml-header", false, "do not print xml header")
|
||||||
|
flag.StringVar(&packageName, "package-name", "", "specify a package name (compiled test have no package name in output)")
|
||||||
|
flag.StringVar(&goVersionFlag, "go-version", "", "specify the value to use for the go.version property in the generated XML")
|
||||||
|
flag.BoolVar(&setExitCode, "set-exit-code", false, "set exit code to 1 if tests failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if flag.NArg() != 0 {
|
||||||
|
fmt.Println("go-junit-report does not accept positional arguments")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read input
|
||||||
|
report, err := parser.Parse(os.Stdin, packageName)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error reading input: %s\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write xml
|
||||||
|
err = formatter.JUnitReportXML(report, noXMLHeader, goVersionFlag, os.Stdout)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error writing XML: %s\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if setExitCode && report.Failures() > 0 {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
23
vendor/github.com/jstemmer/go-junit-report/parser/BUILD
generated
vendored
Normal file
23
vendor/github.com/jstemmer/go-junit-report/parser/BUILD
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["parser.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/github.com/jstemmer/go-junit-report/parser",
|
||||||
|
importpath = "github.com/jstemmer/go-junit-report/parser",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
303
vendor/github.com/jstemmer/go-junit-report/parser/parser.go
generated
vendored
Normal file
303
vendor/github.com/jstemmer/go-junit-report/parser/parser.go
generated
vendored
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Result represents a test result.
|
||||||
|
type Result int
|
||||||
|
|
||||||
|
// Test result constants
|
||||||
|
const (
|
||||||
|
PASS Result = iota
|
||||||
|
FAIL
|
||||||
|
SKIP
|
||||||
|
)
|
||||||
|
|
||||||
|
// Report is a collection of package tests.
|
||||||
|
type Report struct {
|
||||||
|
Packages []Package
|
||||||
|
}
|
||||||
|
|
||||||
|
// Package contains the test results of a single package.
|
||||||
|
type Package struct {
|
||||||
|
Name string
|
||||||
|
Duration time.Duration
|
||||||
|
Tests []*Test
|
||||||
|
Benchmarks []*Benchmark
|
||||||
|
CoveragePct string
|
||||||
|
|
||||||
|
// Time is deprecated, use Duration instead.
|
||||||
|
Time int // in milliseconds
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test contains the results of a single test.
|
||||||
|
type Test struct {
|
||||||
|
Name string
|
||||||
|
Duration time.Duration
|
||||||
|
Result Result
|
||||||
|
Output []string
|
||||||
|
|
||||||
|
SubtestIndent string
|
||||||
|
|
||||||
|
// Time is deprecated, use Duration instead.
|
||||||
|
Time int // in milliseconds
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark contains the results of a single benchmark.
|
||||||
|
type Benchmark struct {
|
||||||
|
Name string
|
||||||
|
Duration time.Duration
|
||||||
|
// number of B/op
|
||||||
|
Bytes int
|
||||||
|
// number of allocs/op
|
||||||
|
Allocs int
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
regexStatus = regexp.MustCompile(`--- (PASS|FAIL|SKIP): (.+) \((\d+\.\d+)(?: seconds|s)\)`)
|
||||||
|
regexIndent = regexp.MustCompile(`^([ \t]+)---`)
|
||||||
|
regexCoverage = regexp.MustCompile(`^coverage:\s+(\d+\.\d+)%\s+of\s+statements(?:\sin\s.+)?$`)
|
||||||
|
regexResult = regexp.MustCompile(`^(ok|FAIL)\s+([^ ]+)\s+(?:(\d+\.\d+)s|\(cached\)|(\[\w+ failed]))(?:\s+coverage:\s+(\d+\.\d+)%\sof\sstatements(?:\sin\s.+)?)?$`)
|
||||||
|
// regexBenchmark captures 3-5 groups: benchmark name, number of times ran, ns/op (with or without decimal), B/op (optional), and allocs/op (optional).
|
||||||
|
regexBenchmark = regexp.MustCompile(`^(Benchmark[^ -]+)(?:-\d+\s+|\s+)(\d+)\s+(\d+|\d+\.\d+)\sns/op(?:\s+(\d+)\sB/op)?(?:\s+(\d+)\sallocs/op)?`)
|
||||||
|
regexOutput = regexp.MustCompile(`( )*\t(.*)`)
|
||||||
|
regexSummary = regexp.MustCompile(`^(PASS|FAIL|SKIP)$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse parses go test output from reader r and returns a report with the
|
||||||
|
// results. An optional pkgName can be given, which is used in case a package
|
||||||
|
// result line is missing.
|
||||||
|
func Parse(r io.Reader, pkgName string) (*Report, error) {
|
||||||
|
reader := bufio.NewReader(r)
|
||||||
|
|
||||||
|
report := &Report{make([]Package, 0)}
|
||||||
|
|
||||||
|
// keep track of tests we find
|
||||||
|
var tests []*Test
|
||||||
|
|
||||||
|
// keep track of benchmarks we find
|
||||||
|
var benchmarks []*Benchmark
|
||||||
|
|
||||||
|
// sum of tests' time, use this if current test has no result line (when it is compiled test)
|
||||||
|
var testsTime time.Duration
|
||||||
|
|
||||||
|
// current test
|
||||||
|
var cur string
|
||||||
|
|
||||||
|
// keep track if we've already seen a summary for the current test
|
||||||
|
var seenSummary bool
|
||||||
|
|
||||||
|
// coverage percentage report for current package
|
||||||
|
var coveragePct string
|
||||||
|
|
||||||
|
// stores mapping between package name and output of build failures
|
||||||
|
var packageCaptures = map[string][]string{}
|
||||||
|
|
||||||
|
// the name of the package which it's build failure output is being captured
|
||||||
|
var capturedPackage string
|
||||||
|
|
||||||
|
// capture any non-test output
|
||||||
|
var buffers = map[string][]string{}
|
||||||
|
|
||||||
|
// parse lines
|
||||||
|
for {
|
||||||
|
l, _, err := reader.ReadLine()
|
||||||
|
if err != nil && err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
line := string(l)
|
||||||
|
|
||||||
|
if strings.HasPrefix(line, "=== RUN ") {
|
||||||
|
// new test
|
||||||
|
cur = strings.TrimSpace(line[8:])
|
||||||
|
tests = append(tests, &Test{
|
||||||
|
Name: cur,
|
||||||
|
Result: FAIL,
|
||||||
|
Output: make([]string, 0),
|
||||||
|
})
|
||||||
|
|
||||||
|
// clear the current build package, so output lines won't be added to that build
|
||||||
|
capturedPackage = ""
|
||||||
|
seenSummary = false
|
||||||
|
} else if matches := regexBenchmark.FindStringSubmatch(line); len(matches) == 6 {
|
||||||
|
bytes, _ := strconv.Atoi(matches[4])
|
||||||
|
allocs, _ := strconv.Atoi(matches[5])
|
||||||
|
|
||||||
|
benchmarks = append(benchmarks, &Benchmark{
|
||||||
|
Name: matches[1],
|
||||||
|
Duration: parseNanoseconds(matches[3]),
|
||||||
|
Bytes: bytes,
|
||||||
|
Allocs: allocs,
|
||||||
|
})
|
||||||
|
} else if strings.HasPrefix(line, "=== PAUSE ") {
|
||||||
|
continue
|
||||||
|
} else if strings.HasPrefix(line, "=== CONT ") {
|
||||||
|
cur = strings.TrimSpace(line[8:])
|
||||||
|
continue
|
||||||
|
} else if matches := regexResult.FindStringSubmatch(line); len(matches) == 6 {
|
||||||
|
if matches[5] != "" {
|
||||||
|
coveragePct = matches[5]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(matches[4], "failed]") {
|
||||||
|
// the build of the package failed, inject a dummy test into the package
|
||||||
|
// which indicate about the failure and contain the failure description.
|
||||||
|
tests = append(tests, &Test{
|
||||||
|
Name: matches[4],
|
||||||
|
Result: FAIL,
|
||||||
|
Output: packageCaptures[matches[2]],
|
||||||
|
})
|
||||||
|
} else if matches[1] == "FAIL" && len(tests) == 0 && len(buffers[cur]) > 0 {
|
||||||
|
// This package didn't have any tests, but it failed with some
|
||||||
|
// output. Create a dummy test with the output.
|
||||||
|
tests = append(tests, &Test{
|
||||||
|
Name: "Failure",
|
||||||
|
Result: FAIL,
|
||||||
|
Output: buffers[cur],
|
||||||
|
})
|
||||||
|
buffers[cur] = buffers[cur][0:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// all tests in this package are finished
|
||||||
|
report.Packages = append(report.Packages, Package{
|
||||||
|
Name: matches[2],
|
||||||
|
Duration: parseSeconds(matches[3]),
|
||||||
|
Tests: tests,
|
||||||
|
Benchmarks: benchmarks,
|
||||||
|
CoveragePct: coveragePct,
|
||||||
|
|
||||||
|
Time: int(parseSeconds(matches[3]) / time.Millisecond), // deprecated
|
||||||
|
})
|
||||||
|
|
||||||
|
buffers[cur] = buffers[cur][0:0]
|
||||||
|
tests = make([]*Test, 0)
|
||||||
|
benchmarks = make([]*Benchmark, 0)
|
||||||
|
coveragePct = ""
|
||||||
|
cur = ""
|
||||||
|
testsTime = 0
|
||||||
|
} else if matches := regexStatus.FindStringSubmatch(line); len(matches) == 4 {
|
||||||
|
cur = matches[2]
|
||||||
|
test := findTest(tests, cur)
|
||||||
|
if test == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// test status
|
||||||
|
if matches[1] == "PASS" {
|
||||||
|
test.Result = PASS
|
||||||
|
} else if matches[1] == "SKIP" {
|
||||||
|
test.Result = SKIP
|
||||||
|
} else {
|
||||||
|
test.Result = FAIL
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := regexIndent.FindStringSubmatch(line); len(matches) == 2 {
|
||||||
|
test.SubtestIndent = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
test.Output = buffers[cur]
|
||||||
|
|
||||||
|
test.Name = matches[2]
|
||||||
|
test.Duration = parseSeconds(matches[3])
|
||||||
|
testsTime += test.Duration
|
||||||
|
|
||||||
|
test.Time = int(test.Duration / time.Millisecond) // deprecated
|
||||||
|
} else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 2 {
|
||||||
|
coveragePct = matches[1]
|
||||||
|
} else if matches := regexOutput.FindStringSubmatch(line); capturedPackage == "" && len(matches) == 3 {
|
||||||
|
// Sub-tests start with one or more series of 4-space indents, followed by a hard tab,
|
||||||
|
// followed by the test output
|
||||||
|
// Top-level tests start with a hard tab.
|
||||||
|
test := findTest(tests, cur)
|
||||||
|
if test == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
test.Output = append(test.Output, matches[2])
|
||||||
|
} else if strings.HasPrefix(line, "# ") {
|
||||||
|
// indicates a capture of build output of a package. set the current build package.
|
||||||
|
capturedPackage = line[2:]
|
||||||
|
} else if capturedPackage != "" {
|
||||||
|
// current line is build failure capture for the current built package
|
||||||
|
packageCaptures[capturedPackage] = append(packageCaptures[capturedPackage], line)
|
||||||
|
} else if regexSummary.MatchString(line) {
|
||||||
|
// don't store any output after the summary
|
||||||
|
seenSummary = true
|
||||||
|
} else if !seenSummary {
|
||||||
|
// buffer anything else that we didn't recognize
|
||||||
|
buffers[cur] = append(buffers[cur], line)
|
||||||
|
|
||||||
|
// if we have a current test, also append to its output
|
||||||
|
test := findTest(tests, cur)
|
||||||
|
if test != nil {
|
||||||
|
if strings.HasPrefix(line, test.SubtestIndent+" ") {
|
||||||
|
test.Output = append(test.Output, strings.TrimPrefix(line, test.SubtestIndent+" "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tests) > 0 {
|
||||||
|
// no result line found
|
||||||
|
report.Packages = append(report.Packages, Package{
|
||||||
|
Name: pkgName,
|
||||||
|
Duration: testsTime,
|
||||||
|
Time: int(testsTime / time.Millisecond),
|
||||||
|
Tests: tests,
|
||||||
|
Benchmarks: benchmarks,
|
||||||
|
CoveragePct: coveragePct,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return report, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSeconds(t string) time.Duration {
|
||||||
|
if t == "" {
|
||||||
|
return time.Duration(0)
|
||||||
|
}
|
||||||
|
// ignore error
|
||||||
|
d, _ := time.ParseDuration(t + "s")
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNanoseconds(t string) time.Duration {
|
||||||
|
// note: if input < 1 ns precision, result will be 0s.
|
||||||
|
if t == "" {
|
||||||
|
return time.Duration(0)
|
||||||
|
}
|
||||||
|
// ignore error
|
||||||
|
d, _ := time.ParseDuration(t + "ns")
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func findTest(tests []*Test, name string) *Test {
|
||||||
|
for i := len(tests) - 1; i >= 0; i-- {
|
||||||
|
if tests[i].Name == name {
|
||||||
|
return tests[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failures counts the number of failed tests in this report
|
||||||
|
func (r *Report) Failures() int {
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for _, p := range r.Packages {
|
||||||
|
for _, t := range p.Tests {
|
||||||
|
if t.Result == FAIL {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return count
|
||||||
|
}
|
||||||
468
vendor/golang.org/x/tools/container/intsets/sparse.go
generated
vendored
468
vendor/golang.org/x/tools/container/intsets/sparse.go
generated
vendored
@@ -21,10 +21,6 @@ package intsets
|
|||||||
// The space usage would be proportional to Max(), not Len(), and the
|
// The space usage would be proportional to Max(), not Len(), and the
|
||||||
// implementation would be based upon big.Int.
|
// implementation would be based upon big.Int.
|
||||||
//
|
//
|
||||||
// TODO(adonovan): experiment with making the root block indirect (nil
|
|
||||||
// iff IsEmpty). This would reduce the memory usage when empty and
|
|
||||||
// might simplify the aliasing invariants.
|
|
||||||
//
|
|
||||||
// TODO(adonovan): opt: make UnionWith and Difference faster.
|
// TODO(adonovan): opt: make UnionWith and Difference faster.
|
||||||
// These are the hot-spots for go/pointer.
|
// These are the hot-spots for go/pointer.
|
||||||
|
|
||||||
@@ -45,9 +41,10 @@ type Sparse struct {
|
|||||||
// An uninitialized Sparse represents an empty set.
|
// An uninitialized Sparse represents an empty set.
|
||||||
// An empty set may also be represented by
|
// An empty set may also be represented by
|
||||||
// root.next == root.prev == &root.
|
// root.next == root.prev == &root.
|
||||||
// In a non-empty set, root.next points to the first block and
|
//
|
||||||
// root.prev to the last.
|
// The root is always the block with the smallest offset.
|
||||||
// root.offset and root.bits are unused.
|
// It can be empty, but only if it is the only block; in that case, offset is
|
||||||
|
// MaxInt (which is not a valid offset).
|
||||||
root block
|
root block
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,7 +141,6 @@ func (b *block) len() int {
|
|||||||
|
|
||||||
// max returns the maximum element of the block.
|
// max returns the maximum element of the block.
|
||||||
// The block must not be empty.
|
// The block must not be empty.
|
||||||
//
|
|
||||||
func (b *block) max() int {
|
func (b *block) max() int {
|
||||||
bi := b.offset + bitsPerBlock
|
bi := b.offset + bitsPerBlock
|
||||||
// Decrement bi by number of high zeros in last.bits.
|
// Decrement bi by number of high zeros in last.bits.
|
||||||
@@ -161,7 +157,6 @@ func (b *block) max() int {
|
|||||||
// and also removes it if take is set.
|
// and also removes it if take is set.
|
||||||
// The block must not be initially empty.
|
// The block must not be initially empty.
|
||||||
// NB: may leave the block empty.
|
// NB: may leave the block empty.
|
||||||
//
|
|
||||||
func (b *block) min(take bool) int {
|
func (b *block) min(take bool) int {
|
||||||
for i, w := range b.bits {
|
for i, w := range b.bits {
|
||||||
if w != 0 {
|
if w != 0 {
|
||||||
@@ -175,6 +170,26 @@ func (b *block) min(take bool) int {
|
|||||||
panic("BUG: empty block")
|
panic("BUG: empty block")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lowerBound returns the smallest element of the block that is greater than or
|
||||||
|
// equal to the element corresponding to the ith bit. If there is no such
|
||||||
|
// element, the second return value is false.
|
||||||
|
func (b *block) lowerBound(i uint) (int, bool) {
|
||||||
|
w := i / bitsPerWord
|
||||||
|
bit := i % bitsPerWord
|
||||||
|
|
||||||
|
if val := b.bits[w] >> bit; val != 0 {
|
||||||
|
return b.offset + int(i) + ntz(val), true
|
||||||
|
}
|
||||||
|
|
||||||
|
for w++; w < wordsPerBlock; w++ {
|
||||||
|
if val := b.bits[w]; val != 0 {
|
||||||
|
return b.offset + int(w*bitsPerWord) + ntz(val), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
// forEach calls f for each element of block b.
|
// forEach calls f for each element of block b.
|
||||||
// f must not mutate b's enclosing Sparse.
|
// f must not mutate b's enclosing Sparse.
|
||||||
func (b *block) forEach(f func(int)) {
|
func (b *block) forEach(f func(int)) {
|
||||||
@@ -204,14 +219,20 @@ func offsetAndBitIndex(x int) (int, uint) {
|
|||||||
|
|
||||||
// -- Sparse --------------------------------------------------------------
|
// -- Sparse --------------------------------------------------------------
|
||||||
|
|
||||||
// start returns the root's next block, which is the root block
|
// none is a shared, empty, sentinel block that indicates the end of a block
|
||||||
// (if s.IsEmpty()) or the first true block otherwise.
|
// list.
|
||||||
// start has the side effect of ensuring that s is properly
|
var none block
|
||||||
// initialized.
|
|
||||||
//
|
// Dummy type used to generate an implicit panic. This must be defined at the
|
||||||
func (s *Sparse) start() *block {
|
// package level; if it is defined inside a function, it prevents the inlining
|
||||||
|
// of that function.
|
||||||
|
type to_copy_a_sparse_you_must_call_its_Copy_method struct{}
|
||||||
|
|
||||||
|
// init ensures s is properly initialized.
|
||||||
|
func (s *Sparse) init() {
|
||||||
root := &s.root
|
root := &s.root
|
||||||
if root.next == nil {
|
if root.next == nil {
|
||||||
|
root.offset = MaxInt
|
||||||
root.next = root
|
root.next = root
|
||||||
root.prev = root
|
root.prev = root
|
||||||
} else if root.next.prev != root {
|
} else if root.next.prev != root {
|
||||||
@@ -219,21 +240,45 @@ func (s *Sparse) start() *block {
|
|||||||
// new Sparse y shares the old linked list, but iteration
|
// new Sparse y shares the old linked list, but iteration
|
||||||
// on y will never encounter &y.root so it goes into a
|
// on y will never encounter &y.root so it goes into a
|
||||||
// loop. Fail fast before this occurs.
|
// loop. Fail fast before this occurs.
|
||||||
panic("A Sparse has been copied without (*Sparse).Copy()")
|
// We don't want to call panic here because it prevents the
|
||||||
|
// inlining of this function.
|
||||||
|
_ = (interface{}(nil)).(to_copy_a_sparse_you_must_call_its_Copy_method)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return root.next
|
func (s *Sparse) first() *block {
|
||||||
|
s.init()
|
||||||
|
if s.root.offset == MaxInt {
|
||||||
|
return &none
|
||||||
|
}
|
||||||
|
return &s.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// next returns the next block in the list, or end if b is the last block.
|
||||||
|
func (s *Sparse) next(b *block) *block {
|
||||||
|
if b.next == &s.root {
|
||||||
|
return &none
|
||||||
|
}
|
||||||
|
return b.next
|
||||||
|
}
|
||||||
|
|
||||||
|
// prev returns the previous block in the list, or end if b is the first block.
|
||||||
|
func (s *Sparse) prev(b *block) *block {
|
||||||
|
if b.prev == &s.root {
|
||||||
|
return &none
|
||||||
|
}
|
||||||
|
return b.prev
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEmpty reports whether the set s is empty.
|
// IsEmpty reports whether the set s is empty.
|
||||||
func (s *Sparse) IsEmpty() bool {
|
func (s *Sparse) IsEmpty() bool {
|
||||||
return s.start() == &s.root
|
return s.root.next == nil || s.root.offset == MaxInt
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns the number of elements in the set s.
|
// Len returns the number of elements in the set s.
|
||||||
func (s *Sparse) Len() int {
|
func (s *Sparse) Len() int {
|
||||||
var l int
|
var l int
|
||||||
for b := s.start(); b != &s.root; b = b.next {
|
for b := s.first(); b != &none; b = s.next(b) {
|
||||||
l += b.len()
|
l += b.len()
|
||||||
}
|
}
|
||||||
return l
|
return l
|
||||||
@@ -252,19 +297,34 @@ func (s *Sparse) Min() int {
|
|||||||
if s.IsEmpty() {
|
if s.IsEmpty() {
|
||||||
return MaxInt
|
return MaxInt
|
||||||
}
|
}
|
||||||
return s.root.next.min(false)
|
return s.root.min(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LowerBound returns the smallest element >= x, or MaxInt if there is no such
|
||||||
|
// element.
|
||||||
|
func (s *Sparse) LowerBound(x int) int {
|
||||||
|
offset, i := offsetAndBitIndex(x)
|
||||||
|
for b := s.first(); b != &none; b = s.next(b) {
|
||||||
|
if b.offset > offset {
|
||||||
|
return b.min(false)
|
||||||
|
}
|
||||||
|
if b.offset == offset {
|
||||||
|
if y, ok := b.lowerBound(i); ok {
|
||||||
|
return y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return MaxInt
|
||||||
}
|
}
|
||||||
|
|
||||||
// block returns the block that would contain offset,
|
// block returns the block that would contain offset,
|
||||||
// or nil if s contains no such block.
|
// or nil if s contains no such block.
|
||||||
//
|
// Precondition: offset is a multiple of bitsPerBlock.
|
||||||
func (s *Sparse) block(offset int) *block {
|
func (s *Sparse) block(offset int) *block {
|
||||||
b := s.start()
|
for b := s.first(); b != &none && b.offset <= offset; b = s.next(b) {
|
||||||
for b != &s.root && b.offset <= offset {
|
|
||||||
if b.offset == offset {
|
if b.offset == offset {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
b = b.next
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -272,26 +332,49 @@ func (s *Sparse) block(offset int) *block {
|
|||||||
// Insert adds x to the set s, and reports whether the set grew.
|
// Insert adds x to the set s, and reports whether the set grew.
|
||||||
func (s *Sparse) Insert(x int) bool {
|
func (s *Sparse) Insert(x int) bool {
|
||||||
offset, i := offsetAndBitIndex(x)
|
offset, i := offsetAndBitIndex(x)
|
||||||
b := s.start()
|
|
||||||
for b != &s.root && b.offset <= offset {
|
b := s.first()
|
||||||
|
for ; b != &none && b.offset <= offset; b = s.next(b) {
|
||||||
if b.offset == offset {
|
if b.offset == offset {
|
||||||
return b.insert(i)
|
return b.insert(i)
|
||||||
}
|
}
|
||||||
b = b.next
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert new block before b.
|
// Insert new block before b.
|
||||||
new := &block{offset: offset}
|
new := s.insertBlockBefore(b)
|
||||||
new.next = b
|
new.offset = offset
|
||||||
new.prev = b.prev
|
|
||||||
new.prev.next = new
|
|
||||||
new.next.prev = new
|
|
||||||
return new.insert(i)
|
return new.insert(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sparse) removeBlock(b *block) {
|
// removeBlock removes a block and returns the block that followed it (or end if
|
||||||
|
// it was the last block).
|
||||||
|
func (s *Sparse) removeBlock(b *block) *block {
|
||||||
|
if b != &s.root {
|
||||||
b.prev.next = b.next
|
b.prev.next = b.next
|
||||||
b.next.prev = b.prev
|
b.next.prev = b.prev
|
||||||
|
if b.next == &s.root {
|
||||||
|
return &none
|
||||||
|
}
|
||||||
|
return b.next
|
||||||
|
}
|
||||||
|
|
||||||
|
first := s.root.next
|
||||||
|
if first == &s.root {
|
||||||
|
// This was the only block.
|
||||||
|
s.Clear()
|
||||||
|
return &none
|
||||||
|
}
|
||||||
|
s.root.offset = first.offset
|
||||||
|
s.root.bits = first.bits
|
||||||
|
if first.next == &s.root {
|
||||||
|
// Single block remaining.
|
||||||
|
s.root.next = &s.root
|
||||||
|
s.root.prev = &s.root
|
||||||
|
} else {
|
||||||
|
s.root.next = first.next
|
||||||
|
first.next.prev = &s.root
|
||||||
|
}
|
||||||
|
return &s.root
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes x from the set s, and reports whether the set shrank.
|
// Remove removes x from the set s, and reports whether the set shrank.
|
||||||
@@ -311,8 +394,11 @@ func (s *Sparse) Remove(x int) bool {
|
|||||||
|
|
||||||
// Clear removes all elements from the set s.
|
// Clear removes all elements from the set s.
|
||||||
func (s *Sparse) Clear() {
|
func (s *Sparse) Clear() {
|
||||||
s.root.next = &s.root
|
s.root = block{
|
||||||
s.root.prev = &s.root
|
offset: MaxInt,
|
||||||
|
next: &s.root,
|
||||||
|
prev: &s.root,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If set s is non-empty, TakeMin sets *p to the minimum element of
|
// If set s is non-empty, TakeMin sets *p to the minimum element of
|
||||||
@@ -325,13 +411,12 @@ func (s *Sparse) Clear() {
|
|||||||
// for worklist.TakeMin(&x) { use(x) }
|
// for worklist.TakeMin(&x) { use(x) }
|
||||||
//
|
//
|
||||||
func (s *Sparse) TakeMin(p *int) bool {
|
func (s *Sparse) TakeMin(p *int) bool {
|
||||||
head := s.start()
|
if s.IsEmpty() {
|
||||||
if head == &s.root {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
*p = head.min(true)
|
*p = s.root.min(true)
|
||||||
if head.empty() {
|
if s.root.empty() {
|
||||||
s.removeBlock(head)
|
s.removeBlock(&s.root)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -352,7 +437,7 @@ func (s *Sparse) Has(x int) bool {
|
|||||||
// natural control flow with continue/break/return.
|
// natural control flow with continue/break/return.
|
||||||
//
|
//
|
||||||
func (s *Sparse) forEach(f func(int)) {
|
func (s *Sparse) forEach(f func(int)) {
|
||||||
for b := s.start(); b != &s.root; b = b.next {
|
for b := s.first(); b != &none; b = s.next(b) {
|
||||||
b.forEach(f)
|
b.forEach(f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -363,22 +448,51 @@ func (s *Sparse) Copy(x *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
for xb != &x.root {
|
for xb != &none {
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
}
|
}
|
||||||
s.discardTail(sb)
|
s.discardTail(sb)
|
||||||
}
|
}
|
||||||
|
|
||||||
// insertBlockBefore returns a new block, inserting it before next.
|
// insertBlockBefore returns a new block, inserting it before next.
|
||||||
|
// If next is the root, the root is replaced. If next is end, the block is
|
||||||
|
// inserted at the end.
|
||||||
func (s *Sparse) insertBlockBefore(next *block) *block {
|
func (s *Sparse) insertBlockBefore(next *block) *block {
|
||||||
|
if s.IsEmpty() {
|
||||||
|
if next != &none {
|
||||||
|
panic("BUG: passed block with empty set")
|
||||||
|
}
|
||||||
|
return &s.root
|
||||||
|
}
|
||||||
|
|
||||||
|
if next == &s.root {
|
||||||
|
// Special case: we need to create a new block that will become the root
|
||||||
|
// block.The old root block becomes the second block.
|
||||||
|
second := s.root
|
||||||
|
s.root = block{
|
||||||
|
next: &second,
|
||||||
|
}
|
||||||
|
if second.next == &s.root {
|
||||||
|
s.root.prev = &second
|
||||||
|
} else {
|
||||||
|
s.root.prev = second.prev
|
||||||
|
second.next.prev = &second
|
||||||
|
second.prev = &s.root
|
||||||
|
}
|
||||||
|
return &s.root
|
||||||
|
}
|
||||||
|
if next == &none {
|
||||||
|
// Insert before root.
|
||||||
|
next = &s.root
|
||||||
|
}
|
||||||
b := new(block)
|
b := new(block)
|
||||||
b.next = next
|
b.next = next
|
||||||
b.prev = next.prev
|
b.prev = next.prev
|
||||||
@@ -389,11 +503,15 @@ func (s *Sparse) insertBlockBefore(next *block) *block {
|
|||||||
|
|
||||||
// discardTail removes block b and all its successors from s.
|
// discardTail removes block b and all its successors from s.
|
||||||
func (s *Sparse) discardTail(b *block) {
|
func (s *Sparse) discardTail(b *block) {
|
||||||
if b != &s.root {
|
if b != &none {
|
||||||
|
if b == &s.root {
|
||||||
|
s.Clear()
|
||||||
|
} else {
|
||||||
b.prev.next = &s.root
|
b.prev.next = &s.root
|
||||||
s.root.prev = b.prev
|
s.root.prev = b.prev
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// IntersectionWith sets s to the intersection s ∩ x.
|
// IntersectionWith sets s to the intersection s ∩ x.
|
||||||
func (s *Sparse) IntersectionWith(x *Sparse) {
|
func (s *Sparse) IntersectionWith(x *Sparse) {
|
||||||
@@ -401,16 +519,15 @@ func (s *Sparse) IntersectionWith(x *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
for xb != &x.root && sb != &s.root {
|
for xb != &none && sb != &none {
|
||||||
switch {
|
switch {
|
||||||
case xb.offset < sb.offset:
|
case xb.offset < sb.offset:
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
|
|
||||||
case xb.offset > sb.offset:
|
case xb.offset > sb.offset:
|
||||||
sb = sb.next
|
sb = s.removeBlock(sb)
|
||||||
s.removeBlock(sb.prev)
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
var sum word
|
var sum word
|
||||||
@@ -420,12 +537,12 @@ func (s *Sparse) IntersectionWith(x *Sparse) {
|
|||||||
sum |= r
|
sum |= r
|
||||||
}
|
}
|
||||||
if sum != 0 {
|
if sum != 0 {
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
} else {
|
} else {
|
||||||
// sb will be overwritten or removed
|
// sb will be overwritten or removed
|
||||||
}
|
}
|
||||||
|
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -446,20 +563,20 @@ func (s *Sparse) Intersection(x, y *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
yb := y.start()
|
yb := y.first()
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
for xb != &x.root && yb != &y.root {
|
for xb != &none && yb != &none {
|
||||||
switch {
|
switch {
|
||||||
case xb.offset < yb.offset:
|
case xb.offset < yb.offset:
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
continue
|
continue
|
||||||
case xb.offset > yb.offset:
|
case xb.offset > yb.offset:
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
@@ -471,13 +588,13 @@ func (s *Sparse) Intersection(x, y *Sparse) {
|
|||||||
sum |= r
|
sum |= r
|
||||||
}
|
}
|
||||||
if sum != 0 {
|
if sum != 0 {
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
} else {
|
} else {
|
||||||
// sb will be overwritten or removed
|
// sb will be overwritten or removed
|
||||||
}
|
}
|
||||||
|
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.discardTail(sb)
|
s.discardTail(sb)
|
||||||
@@ -485,22 +602,22 @@ func (s *Sparse) Intersection(x, y *Sparse) {
|
|||||||
|
|
||||||
// Intersects reports whether s ∩ x ≠ ∅.
|
// Intersects reports whether s ∩ x ≠ ∅.
|
||||||
func (s *Sparse) Intersects(x *Sparse) bool {
|
func (s *Sparse) Intersects(x *Sparse) bool {
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
for sb != &s.root && xb != &x.root {
|
for sb != &none && xb != &none {
|
||||||
switch {
|
switch {
|
||||||
case xb.offset < sb.offset:
|
case xb.offset < sb.offset:
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
case xb.offset > sb.offset:
|
case xb.offset > sb.offset:
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
default:
|
default:
|
||||||
for i := range sb.bits {
|
for i := range sb.bits {
|
||||||
if sb.bits[i]&xb.bits[i] != 0 {
|
if sb.bits[i]&xb.bits[i] != 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@@ -513,26 +630,26 @@ func (s *Sparse) UnionWith(x *Sparse) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var changed bool
|
var changed bool
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
for xb != &x.root {
|
for xb != &none {
|
||||||
if sb != &s.root && sb.offset == xb.offset {
|
if sb != &none && sb.offset == xb.offset {
|
||||||
for i := range xb.bits {
|
for i := range xb.bits {
|
||||||
if sb.bits[i] != xb.bits[i] {
|
if sb.bits[i] != xb.bits[i] {
|
||||||
sb.bits[i] |= xb.bits[i]
|
sb.bits[i] |= xb.bits[i]
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
} else if sb == &s.root || sb.offset > xb.offset {
|
} else if sb == &none || sb.offset > xb.offset {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
changed = true
|
changed = true
|
||||||
|
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
}
|
}
|
||||||
return changed
|
return changed
|
||||||
}
|
}
|
||||||
@@ -551,33 +668,33 @@ func (s *Sparse) Union(x, y *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
yb := y.start()
|
yb := y.first()
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
for xb != &x.root || yb != &y.root {
|
for xb != &none || yb != &none {
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case yb == &y.root || (xb != &x.root && xb.offset < yb.offset):
|
case yb == &none || (xb != &none && xb.offset < yb.offset):
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
|
|
||||||
case xb == &x.root || (yb != &y.root && yb.offset < xb.offset):
|
case xb == &none || (yb != &none && yb.offset < xb.offset):
|
||||||
sb.offset = yb.offset
|
sb.offset = yb.offset
|
||||||
sb.bits = yb.bits
|
sb.bits = yb.bits
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
for i := range xb.bits {
|
for i := range xb.bits {
|
||||||
sb.bits[i] = xb.bits[i] | yb.bits[i]
|
sb.bits[i] = xb.bits[i] | yb.bits[i]
|
||||||
}
|
}
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
}
|
}
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.discardTail(sb)
|
s.discardTail(sb)
|
||||||
@@ -590,15 +707,15 @@ func (s *Sparse) DifferenceWith(x *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
for xb != &x.root && sb != &s.root {
|
for xb != &none && sb != &none {
|
||||||
switch {
|
switch {
|
||||||
case xb.offset > sb.offset:
|
case xb.offset > sb.offset:
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
|
|
||||||
case xb.offset < sb.offset:
|
case xb.offset < sb.offset:
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
var sum word
|
var sum word
|
||||||
@@ -607,12 +724,12 @@ func (s *Sparse) DifferenceWith(x *Sparse) {
|
|||||||
sb.bits[i] = r
|
sb.bits[i] = r
|
||||||
sum |= r
|
sum |= r
|
||||||
}
|
}
|
||||||
sb = sb.next
|
|
||||||
xb = xb.next
|
|
||||||
|
|
||||||
if sum == 0 {
|
if sum == 0 {
|
||||||
s.removeBlock(sb.prev)
|
sb = s.removeBlock(sb)
|
||||||
|
} else {
|
||||||
|
sb = s.next(sb)
|
||||||
}
|
}
|
||||||
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -633,27 +750,27 @@ func (s *Sparse) Difference(x, y *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
yb := y.start()
|
yb := y.first()
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
for xb != &x.root && yb != &y.root {
|
for xb != &none && yb != &none {
|
||||||
if xb.offset > yb.offset {
|
if xb.offset > yb.offset {
|
||||||
// y has block, x has none
|
// y has block, x has &none
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case xb.offset < yb.offset:
|
case xb.offset < yb.offset:
|
||||||
// x has block, y has none
|
// x has block, y has &none
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
|
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// x and y have corresponding blocks
|
// x and y have corresponding blocks
|
||||||
@@ -664,25 +781,25 @@ func (s *Sparse) Difference(x, y *Sparse) {
|
|||||||
sum |= r
|
sum |= r
|
||||||
}
|
}
|
||||||
if sum != 0 {
|
if sum != 0 {
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
} else {
|
} else {
|
||||||
// sb will be overwritten or removed
|
// sb will be overwritten or removed
|
||||||
}
|
}
|
||||||
|
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
}
|
}
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
|
|
||||||
for xb != &x.root {
|
for xb != &none {
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
|
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.discardTail(sb)
|
s.discardTail(sb)
|
||||||
@@ -695,17 +812,17 @@ func (s *Sparse) SymmetricDifferenceWith(x *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
for xb != &x.root && sb != &s.root {
|
for xb != &none && sb != &none {
|
||||||
switch {
|
switch {
|
||||||
case sb.offset < xb.offset:
|
case sb.offset < xb.offset:
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
case xb.offset < sb.offset:
|
case xb.offset < sb.offset:
|
||||||
nb := s.insertBlockBefore(sb)
|
nb := s.insertBlockBefore(sb)
|
||||||
nb.offset = xb.offset
|
nb.offset = xb.offset
|
||||||
nb.bits = xb.bits
|
nb.bits = xb.bits
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
default:
|
default:
|
||||||
var sum word
|
var sum word
|
||||||
for i := range sb.bits {
|
for i := range sb.bits {
|
||||||
@@ -713,20 +830,21 @@ func (s *Sparse) SymmetricDifferenceWith(x *Sparse) {
|
|||||||
sb.bits[i] = r
|
sb.bits[i] = r
|
||||||
sum |= r
|
sum |= r
|
||||||
}
|
}
|
||||||
sb = sb.next
|
|
||||||
xb = xb.next
|
|
||||||
if sum == 0 {
|
if sum == 0 {
|
||||||
s.removeBlock(sb.prev)
|
sb = s.removeBlock(sb)
|
||||||
|
} else {
|
||||||
|
sb = s.next(sb)
|
||||||
}
|
}
|
||||||
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for xb != &x.root { // append the tail of x to s
|
for xb != &none { // append the tail of x to s
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -744,24 +862,24 @@ func (s *Sparse) SymmetricDifference(x, y *Sparse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
yb := y.start()
|
yb := y.first()
|
||||||
for xb != &x.root && yb != &y.root {
|
for xb != &none && yb != &none {
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case yb.offset < xb.offset:
|
case yb.offset < xb.offset:
|
||||||
sb.offset = yb.offset
|
sb.offset = yb.offset
|
||||||
sb.bits = yb.bits
|
sb.bits = yb.bits
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
case xb.offset < yb.offset:
|
case xb.offset < yb.offset:
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
default:
|
default:
|
||||||
var sum word
|
var sum word
|
||||||
for i := range sb.bits {
|
for i := range sb.bits {
|
||||||
@@ -771,31 +889,31 @@ func (s *Sparse) SymmetricDifference(x, y *Sparse) {
|
|||||||
}
|
}
|
||||||
if sum != 0 {
|
if sum != 0 {
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
}
|
}
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for xb != &x.root { // append the tail of x to s
|
for xb != &none { // append the tail of x to s
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
sb.offset = xb.offset
|
sb.offset = xb.offset
|
||||||
sb.bits = xb.bits
|
sb.bits = xb.bits
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
|
|
||||||
for yb != &y.root { // append the tail of y to s
|
for yb != &none { // append the tail of y to s
|
||||||
if sb == &s.root {
|
if sb == &none {
|
||||||
sb = s.insertBlockBefore(sb)
|
sb = s.insertBlockBefore(sb)
|
||||||
}
|
}
|
||||||
sb.offset = yb.offset
|
sb.offset = yb.offset
|
||||||
sb.bits = yb.bits
|
sb.bits = yb.bits
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
yb = yb.next
|
yb = y.next(yb)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.discardTail(sb)
|
s.discardTail(sb)
|
||||||
@@ -807,22 +925,22 @@ func (s *Sparse) SubsetOf(x *Sparse) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
xb := x.start()
|
xb := x.first()
|
||||||
for sb != &s.root {
|
for sb != &none {
|
||||||
switch {
|
switch {
|
||||||
case xb == &x.root || xb.offset > sb.offset:
|
case xb == &none || xb.offset > sb.offset:
|
||||||
return false
|
return false
|
||||||
case xb.offset < sb.offset:
|
case xb.offset < sb.offset:
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
default:
|
default:
|
||||||
for i := range sb.bits {
|
for i := range sb.bits {
|
||||||
if sb.bits[i]&^xb.bits[i] != 0 {
|
if sb.bits[i]&^xb.bits[i] != 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
xb = xb.next
|
xb = x.next(xb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
@@ -833,13 +951,13 @@ func (s *Sparse) Equals(t *Sparse) bool {
|
|||||||
if s == t {
|
if s == t {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
sb := s.start()
|
sb := s.first()
|
||||||
tb := t.start()
|
tb := t.first()
|
||||||
for {
|
for {
|
||||||
switch {
|
switch {
|
||||||
case sb == &s.root && tb == &t.root:
|
case sb == &none && tb == &none:
|
||||||
return true
|
return true
|
||||||
case sb == &s.root || tb == &t.root:
|
case sb == &none || tb == &none:
|
||||||
return false
|
return false
|
||||||
case sb.offset != tb.offset:
|
case sb.offset != tb.offset:
|
||||||
return false
|
return false
|
||||||
@@ -847,8 +965,8 @@ func (s *Sparse) Equals(t *Sparse) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
sb = sb.next
|
sb = s.next(sb)
|
||||||
tb = tb.next
|
tb = t.next(tb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -913,7 +1031,7 @@ func (s *Sparse) BitString() string {
|
|||||||
//
|
//
|
||||||
func (s *Sparse) GoString() string {
|
func (s *Sparse) GoString() string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
for b := s.start(); b != &s.root; b = b.next {
|
for b := s.first(); b != &none; b = s.next(b) {
|
||||||
fmt.Fprintf(&buf, "block %p {offset=%d next=%p prev=%p",
|
fmt.Fprintf(&buf, "block %p {offset=%d next=%p prev=%p",
|
||||||
b, b.offset, b.next, b.prev)
|
b, b.offset, b.next, b.prev)
|
||||||
for _, w := range b.bits {
|
for _, w := range b.bits {
|
||||||
@@ -937,13 +1055,18 @@ func (s *Sparse) AppendTo(slice []int) []int {
|
|||||||
|
|
||||||
// check returns an error if the representation invariants of s are violated.
|
// check returns an error if the representation invariants of s are violated.
|
||||||
func (s *Sparse) check() error {
|
func (s *Sparse) check() error {
|
||||||
if !s.root.empty() {
|
s.init()
|
||||||
return fmt.Errorf("non-empty root block")
|
if s.root.empty() {
|
||||||
|
// An empty set must have only the root block with offset MaxInt.
|
||||||
|
if s.root.next != &s.root {
|
||||||
|
return fmt.Errorf("multiple blocks with empty root block")
|
||||||
}
|
}
|
||||||
if s.root.offset != 0 {
|
if s.root.offset != MaxInt {
|
||||||
return fmt.Errorf("root block has non-zero offset %d", s.root.offset)
|
return fmt.Errorf("empty set has offset %d, should be MaxInt", s.root.offset)
|
||||||
}
|
}
|
||||||
for b := s.start(); b != &s.root; b = b.next {
|
return nil
|
||||||
|
}
|
||||||
|
for b := s.first(); ; b = s.next(b) {
|
||||||
if b.offset%bitsPerBlock != 0 {
|
if b.offset%bitsPerBlock != 0 {
|
||||||
return fmt.Errorf("bad offset modulo: %d", b.offset)
|
return fmt.Errorf("bad offset modulo: %d", b.offset)
|
||||||
}
|
}
|
||||||
@@ -956,11 +1079,12 @@ func (s *Sparse) check() error {
|
|||||||
if b.next.prev != b {
|
if b.next.prev != b {
|
||||||
return fmt.Errorf("bad next.prev link")
|
return fmt.Errorf("bad next.prev link")
|
||||||
}
|
}
|
||||||
if b.prev != &s.root {
|
if b.next == &s.root {
|
||||||
if b.offset <= b.prev.offset {
|
break
|
||||||
return fmt.Errorf("bad offset order: b.offset=%d, prev.offset=%d",
|
|
||||||
b.offset, b.prev.offset)
|
|
||||||
}
|
}
|
||||||
|
if b.offset >= b.next.offset {
|
||||||
|
return fmt.Errorf("bad offset order: b.offset=%d, b.next.offset=%d",
|
||||||
|
b.offset, b.next.offset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
1
vendor/golang.org/x/tools/go/ast/astutil/BUILD
generated
vendored
1
vendor/golang.org/x/tools/go/ast/astutil/BUILD
generated
vendored
@@ -5,6 +5,7 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"enclosing.go",
|
"enclosing.go",
|
||||||
"imports.go",
|
"imports.go",
|
||||||
|
"rewrite.go",
|
||||||
"util.go",
|
"util.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/ast/astutil",
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/ast/astutil",
|
||||||
|
|||||||
85
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
85
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
@@ -14,26 +14,26 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// AddImport adds the import path to the file f, if absent.
|
// AddImport adds the import path to the file f, if absent.
|
||||||
func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) {
|
func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
|
||||||
return AddNamedImport(fset, f, "", ipath)
|
return AddNamedImport(fset, f, "", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddNamedImport adds the import path to the file f, if absent.
|
// AddNamedImport adds the import with the given name and path to the file f, if absent.
|
||||||
// If name is not empty, it is used to rename the import.
|
// If name is not empty, it is used to rename the import.
|
||||||
//
|
//
|
||||||
// For example, calling
|
// For example, calling
|
||||||
// AddNamedImport(fset, f, "pathpkg", "path")
|
// AddNamedImport(fset, f, "pathpkg", "path")
|
||||||
// adds
|
// adds
|
||||||
// import pathpkg "path"
|
// import pathpkg "path"
|
||||||
func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
|
func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
|
||||||
if imports(f, ipath) {
|
if imports(f, name, path) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
newImport := &ast.ImportSpec{
|
newImport := &ast.ImportSpec{
|
||||||
Path: &ast.BasicLit{
|
Path: &ast.BasicLit{
|
||||||
Kind: token.STRING,
|
Kind: token.STRING,
|
||||||
Value: strconv.Quote(ipath),
|
Value: strconv.Quote(path),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if name != "" {
|
if name != "" {
|
||||||
@@ -43,12 +43,14 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
|
|||||||
// Find an import decl to add to.
|
// Find an import decl to add to.
|
||||||
// The goal is to find an existing import
|
// The goal is to find an existing import
|
||||||
// whose import path has the longest shared
|
// whose import path has the longest shared
|
||||||
// prefix with ipath.
|
// prefix with path.
|
||||||
var (
|
var (
|
||||||
bestMatch = -1 // length of longest shared prefix
|
bestMatch = -1 // length of longest shared prefix
|
||||||
lastImport = -1 // index in f.Decls of the file's final import decl
|
lastImport = -1 // index in f.Decls of the file's final import decl
|
||||||
impDecl *ast.GenDecl // import decl containing the best match
|
impDecl *ast.GenDecl // import decl containing the best match
|
||||||
impIndex = -1 // spec index in impDecl containing the best match
|
impIndex = -1 // spec index in impDecl containing the best match
|
||||||
|
|
||||||
|
isThirdPartyPath = isThirdParty(path)
|
||||||
)
|
)
|
||||||
for i, decl := range f.Decls {
|
for i, decl := range f.Decls {
|
||||||
gen, ok := decl.(*ast.GenDecl)
|
gen, ok := decl.(*ast.GenDecl)
|
||||||
@@ -65,15 +67,27 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
|
|||||||
impDecl = gen
|
impDecl = gen
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute longest shared prefix with imports in this group.
|
// Compute longest shared prefix with imports in this group and find best
|
||||||
|
// matched import spec.
|
||||||
|
// 1. Always prefer import spec with longest shared prefix.
|
||||||
|
// 2. While match length is 0,
|
||||||
|
// - for stdlib package: prefer first import spec.
|
||||||
|
// - for third party package: prefer first third party import spec.
|
||||||
|
// We cannot use last import spec as best match for third party package
|
||||||
|
// because grouped imports are usually placed last by goimports -local
|
||||||
|
// flag.
|
||||||
|
// See issue #19190.
|
||||||
|
seenAnyThirdParty := false
|
||||||
for j, spec := range gen.Specs {
|
for j, spec := range gen.Specs {
|
||||||
impspec := spec.(*ast.ImportSpec)
|
impspec := spec.(*ast.ImportSpec)
|
||||||
n := matchLen(importPath(impspec), ipath)
|
p := importPath(impspec)
|
||||||
if n > bestMatch {
|
n := matchLen(p, path)
|
||||||
|
if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
|
||||||
bestMatch = n
|
bestMatch = n
|
||||||
impDecl = gen
|
impDecl = gen
|
||||||
impIndex = j
|
impIndex = j
|
||||||
}
|
}
|
||||||
|
seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -87,8 +101,8 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
|
|||||||
impDecl.TokPos = f.Decls[lastImport].End()
|
impDecl.TokPos = f.Decls[lastImport].End()
|
||||||
} else {
|
} else {
|
||||||
// There are no existing imports.
|
// There are no existing imports.
|
||||||
// Our new import goes after the package declaration and after
|
// Our new import, preceded by a blank line, goes after the package declaration
|
||||||
// the comment, if any, that starts on the same line as the
|
// and after the comment, if any, that starts on the same line as the
|
||||||
// package declaration.
|
// package declaration.
|
||||||
impDecl.TokPos = f.Package
|
impDecl.TokPos = f.Package
|
||||||
|
|
||||||
@@ -98,7 +112,8 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
|
|||||||
if file.Line(c.Pos()) > pkgLine {
|
if file.Line(c.Pos()) > pkgLine {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
impDecl.TokPos = c.End()
|
// +2 for a blank line
|
||||||
|
impDecl.TokPos = c.End() + 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
f.Decls = append(f.Decls, nil)
|
f.Decls = append(f.Decls, nil)
|
||||||
@@ -175,12 +190,20 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isThirdParty(importPath string) bool {
|
||||||
|
// Third party package import path usually contains "." (".com", ".org", ...)
|
||||||
|
// This logic is taken from golang.org/x/tools/imports package.
|
||||||
|
return strings.Contains(importPath, ".")
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteImport deletes the import path from the file f, if present.
|
// DeleteImport deletes the import path from the file f, if present.
|
||||||
|
// If there are duplicate import declarations, all matching ones are deleted.
|
||||||
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
|
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
|
||||||
return DeleteNamedImport(fset, f, "", path)
|
return DeleteNamedImport(fset, f, "", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
|
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
|
||||||
|
// If there are duplicate import declarations, all matching ones are deleted.
|
||||||
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
|
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
|
||||||
var delspecs []*ast.ImportSpec
|
var delspecs []*ast.ImportSpec
|
||||||
var delcomments []*ast.CommentGroup
|
var delcomments []*ast.CommentGroup
|
||||||
@@ -195,13 +218,7 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
|
|||||||
for j := 0; j < len(gen.Specs); j++ {
|
for j := 0; j < len(gen.Specs); j++ {
|
||||||
spec := gen.Specs[j]
|
spec := gen.Specs[j]
|
||||||
impspec := spec.(*ast.ImportSpec)
|
impspec := spec.(*ast.ImportSpec)
|
||||||
if impspec.Name == nil && name != "" {
|
if importName(impspec) != name || importPath(impspec) != path {
|
||||||
continue
|
|
||||||
}
|
|
||||||
if impspec.Name != nil && impspec.Name.Name != name {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if importPath(impspec) != path {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -262,7 +279,7 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
|
|||||||
// There was a blank line immediately preceding the deleted import,
|
// There was a blank line immediately preceding the deleted import,
|
||||||
// so there's no need to close the hole.
|
// so there's no need to close the hole.
|
||||||
// Do nothing.
|
// Do nothing.
|
||||||
} else {
|
} else if line != fset.File(gen.Rparen).LineCount() {
|
||||||
// There was no blank line. Close the hole.
|
// There was no blank line. Close the hole.
|
||||||
fset.File(gen.Rparen).MergeLine(line)
|
fset.File(gen.Rparen).MergeLine(line)
|
||||||
}
|
}
|
||||||
@@ -362,9 +379,14 @@ func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
|||||||
return fn
|
return fn
|
||||||
}
|
}
|
||||||
|
|
||||||
// imports returns true if f imports path.
|
// imports reports whether f has an import with the specified name and path.
|
||||||
func imports(f *ast.File, path string) bool {
|
func imports(f *ast.File, name, path string) bool {
|
||||||
return importSpec(f, path) != nil
|
for _, s := range f.Imports {
|
||||||
|
if importName(s) == name && importPath(s) == path {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// importSpec returns the import spec if f imports path,
|
// importSpec returns the import spec if f imports path,
|
||||||
@@ -378,15 +400,24 @@ func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// importName returns the name of s,
|
||||||
|
// or "" if the import is not named.
|
||||||
|
func importName(s *ast.ImportSpec) string {
|
||||||
|
if s.Name == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return s.Name.Name
|
||||||
|
}
|
||||||
|
|
||||||
// importPath returns the unquoted import path of s,
|
// importPath returns the unquoted import path of s,
|
||||||
// or "" if the path is not properly quoted.
|
// or "" if the path is not properly quoted.
|
||||||
func importPath(s *ast.ImportSpec) string {
|
func importPath(s *ast.ImportSpec) string {
|
||||||
t, err := strconv.Unquote(s.Path.Value)
|
t, err := strconv.Unquote(s.Path.Value)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
return t
|
|
||||||
}
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
// declImports reports whether gen contains an import of path.
|
// declImports reports whether gen contains an import of path.
|
||||||
func declImports(gen *ast.GenDecl, path string) bool {
|
func declImports(gen *ast.GenDecl, path string) bool {
|
||||||
|
|||||||
477
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
Normal file
477
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package astutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
|
||||||
|
// before and/or after the node's children, using a Cursor describing
|
||||||
|
// the current node and providing operations on it.
|
||||||
|
//
|
||||||
|
// The return value of ApplyFunc controls the syntax tree traversal.
|
||||||
|
// See Apply for details.
|
||||||
|
type ApplyFunc func(*Cursor) bool
|
||||||
|
|
||||||
|
// Apply traverses a syntax tree recursively, starting with root,
|
||||||
|
// and calling pre and post for each node as described below.
|
||||||
|
// Apply returns the syntax tree, possibly modified.
|
||||||
|
//
|
||||||
|
// If pre is not nil, it is called for each node before the node's
|
||||||
|
// children are traversed (pre-order). If pre returns false, no
|
||||||
|
// children are traversed, and post is not called for that node.
|
||||||
|
//
|
||||||
|
// If post is not nil, and a prior call of pre didn't return false,
|
||||||
|
// post is called for each node after its children are traversed
|
||||||
|
// (post-order). If post returns false, traversal is terminated and
|
||||||
|
// Apply returns immediately.
|
||||||
|
//
|
||||||
|
// Only fields that refer to AST nodes are considered children;
|
||||||
|
// i.e., token.Pos, Scopes, Objects, and fields of basic types
|
||||||
|
// (strings, etc.) are ignored.
|
||||||
|
//
|
||||||
|
// Children are traversed in the order in which they appear in the
|
||||||
|
// respective node's struct definition. A package's files are
|
||||||
|
// traversed in the filenames' alphabetical order.
|
||||||
|
//
|
||||||
|
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
|
||||||
|
parent := &struct{ ast.Node }{root}
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil && r != abort {
|
||||||
|
panic(r)
|
||||||
|
}
|
||||||
|
result = parent.Node
|
||||||
|
}()
|
||||||
|
a := &application{pre: pre, post: post}
|
||||||
|
a.apply(parent, "Node", nil, root)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var abort = new(int) // singleton, to signal termination of Apply
|
||||||
|
|
||||||
|
// A Cursor describes a node encountered during Apply.
|
||||||
|
// Information about the node and its parent is available
|
||||||
|
// from the Node, Parent, Name, and Index methods.
|
||||||
|
//
|
||||||
|
// If p is a variable of type and value of the current parent node
|
||||||
|
// c.Parent(), and f is the field identifier with name c.Name(),
|
||||||
|
// the following invariants hold:
|
||||||
|
//
|
||||||
|
// p.f == c.Node() if c.Index() < 0
|
||||||
|
// p.f[c.Index()] == c.Node() if c.Index() >= 0
|
||||||
|
//
|
||||||
|
// The methods Replace, Delete, InsertBefore, and InsertAfter
|
||||||
|
// can be used to change the AST without disrupting Apply.
|
||||||
|
type Cursor struct {
|
||||||
|
parent ast.Node
|
||||||
|
name string
|
||||||
|
iter *iterator // valid if non-nil
|
||||||
|
node ast.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node returns the current Node.
|
||||||
|
func (c *Cursor) Node() ast.Node { return c.node }
|
||||||
|
|
||||||
|
// Parent returns the parent of the current Node.
|
||||||
|
func (c *Cursor) Parent() ast.Node { return c.parent }
|
||||||
|
|
||||||
|
// Name returns the name of the parent Node field that contains the current Node.
|
||||||
|
// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
|
||||||
|
// the filename for the current Node.
|
||||||
|
func (c *Cursor) Name() string { return c.name }
|
||||||
|
|
||||||
|
// Index reports the index >= 0 of the current Node in the slice of Nodes that
|
||||||
|
// contains it, or a value < 0 if the current Node is not part of a slice.
|
||||||
|
// The index of the current node changes if InsertBefore is called while
|
||||||
|
// processing the current node.
|
||||||
|
func (c *Cursor) Index() int {
|
||||||
|
if c.iter != nil {
|
||||||
|
return c.iter.index
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// field returns the current node's parent field value.
|
||||||
|
func (c *Cursor) field() reflect.Value {
|
||||||
|
return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace replaces the current Node with n.
|
||||||
|
// The replacement node is not walked by Apply.
|
||||||
|
func (c *Cursor) Replace(n ast.Node) {
|
||||||
|
if _, ok := c.node.(*ast.File); ok {
|
||||||
|
file, ok := n.(*ast.File)
|
||||||
|
if !ok {
|
||||||
|
panic("attempt to replace *ast.File with non-*ast.File")
|
||||||
|
}
|
||||||
|
c.parent.(*ast.Package).Files[c.name] = file
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v := c.field()
|
||||||
|
if i := c.Index(); i >= 0 {
|
||||||
|
v = v.Index(i)
|
||||||
|
}
|
||||||
|
v.Set(reflect.ValueOf(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the current Node from its containing slice.
|
||||||
|
// If the current Node is not part of a slice, Delete panics.
|
||||||
|
// As a special case, if the current node is a package file,
|
||||||
|
// Delete removes it from the package's Files map.
|
||||||
|
func (c *Cursor) Delete() {
|
||||||
|
if _, ok := c.node.(*ast.File); ok {
|
||||||
|
delete(c.parent.(*ast.Package).Files, c.name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
i := c.Index()
|
||||||
|
if i < 0 {
|
||||||
|
panic("Delete node not contained in slice")
|
||||||
|
}
|
||||||
|
v := c.field()
|
||||||
|
l := v.Len()
|
||||||
|
reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
|
||||||
|
v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
|
||||||
|
v.SetLen(l - 1)
|
||||||
|
c.iter.step--
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertAfter inserts n after the current Node in its containing slice.
|
||||||
|
// If the current Node is not part of a slice, InsertAfter panics.
|
||||||
|
// Apply does not walk n.
|
||||||
|
func (c *Cursor) InsertAfter(n ast.Node) {
|
||||||
|
i := c.Index()
|
||||||
|
if i < 0 {
|
||||||
|
panic("InsertAfter node not contained in slice")
|
||||||
|
}
|
||||||
|
v := c.field()
|
||||||
|
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
|
||||||
|
l := v.Len()
|
||||||
|
reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
|
||||||
|
v.Index(i + 1).Set(reflect.ValueOf(n))
|
||||||
|
c.iter.step++
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertBefore inserts n before the current Node in its containing slice.
|
||||||
|
// If the current Node is not part of a slice, InsertBefore panics.
|
||||||
|
// Apply will not walk n.
|
||||||
|
func (c *Cursor) InsertBefore(n ast.Node) {
|
||||||
|
i := c.Index()
|
||||||
|
if i < 0 {
|
||||||
|
panic("InsertBefore node not contained in slice")
|
||||||
|
}
|
||||||
|
v := c.field()
|
||||||
|
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
|
||||||
|
l := v.Len()
|
||||||
|
reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
|
||||||
|
v.Index(i).Set(reflect.ValueOf(n))
|
||||||
|
c.iter.index++
|
||||||
|
}
|
||||||
|
|
||||||
|
// application carries all the shared data so we can pass it around cheaply.
|
||||||
|
type application struct {
|
||||||
|
pre, post ApplyFunc
|
||||||
|
cursor Cursor
|
||||||
|
iter iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
|
||||||
|
// convert typed nil into untyped nil
|
||||||
|
if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
n = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
|
||||||
|
saved := a.cursor
|
||||||
|
a.cursor.parent = parent
|
||||||
|
a.cursor.name = name
|
||||||
|
a.cursor.iter = iter
|
||||||
|
a.cursor.node = n
|
||||||
|
|
||||||
|
if a.pre != nil && !a.pre(&a.cursor) {
|
||||||
|
a.cursor = saved
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk children
|
||||||
|
// (the order of the cases matches the order of the corresponding node types in go/ast)
|
||||||
|
switch n := n.(type) {
|
||||||
|
case nil:
|
||||||
|
// nothing to do
|
||||||
|
|
||||||
|
// Comments and fields
|
||||||
|
case *ast.Comment:
|
||||||
|
// nothing to do
|
||||||
|
|
||||||
|
case *ast.CommentGroup:
|
||||||
|
if n != nil {
|
||||||
|
a.applyList(n, "List")
|
||||||
|
}
|
||||||
|
|
||||||
|
case *ast.Field:
|
||||||
|
a.apply(n, "Doc", nil, n.Doc)
|
||||||
|
a.applyList(n, "Names")
|
||||||
|
a.apply(n, "Type", nil, n.Type)
|
||||||
|
a.apply(n, "Tag", nil, n.Tag)
|
||||||
|
a.apply(n, "Comment", nil, n.Comment)
|
||||||
|
|
||||||
|
case *ast.FieldList:
|
||||||
|
a.applyList(n, "List")
|
||||||
|
|
||||||
|
// Expressions
|
||||||
|
case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
|
||||||
|
// nothing to do
|
||||||
|
|
||||||
|
case *ast.Ellipsis:
|
||||||
|
a.apply(n, "Elt", nil, n.Elt)
|
||||||
|
|
||||||
|
case *ast.FuncLit:
|
||||||
|
a.apply(n, "Type", nil, n.Type)
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
|
||||||
|
case *ast.CompositeLit:
|
||||||
|
a.apply(n, "Type", nil, n.Type)
|
||||||
|
a.applyList(n, "Elts")
|
||||||
|
|
||||||
|
case *ast.ParenExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
|
||||||
|
case *ast.SelectorExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
a.apply(n, "Sel", nil, n.Sel)
|
||||||
|
|
||||||
|
case *ast.IndexExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
a.apply(n, "Index", nil, n.Index)
|
||||||
|
|
||||||
|
case *ast.SliceExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
a.apply(n, "Low", nil, n.Low)
|
||||||
|
a.apply(n, "High", nil, n.High)
|
||||||
|
a.apply(n, "Max", nil, n.Max)
|
||||||
|
|
||||||
|
case *ast.TypeAssertExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
a.apply(n, "Type", nil, n.Type)
|
||||||
|
|
||||||
|
case *ast.CallExpr:
|
||||||
|
a.apply(n, "Fun", nil, n.Fun)
|
||||||
|
a.applyList(n, "Args")
|
||||||
|
|
||||||
|
case *ast.StarExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
|
||||||
|
case *ast.UnaryExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
|
||||||
|
case *ast.BinaryExpr:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
a.apply(n, "Y", nil, n.Y)
|
||||||
|
|
||||||
|
case *ast.KeyValueExpr:
|
||||||
|
a.apply(n, "Key", nil, n.Key)
|
||||||
|
a.apply(n, "Value", nil, n.Value)
|
||||||
|
|
||||||
|
// Types
|
||||||
|
case *ast.ArrayType:
|
||||||
|
a.apply(n, "Len", nil, n.Len)
|
||||||
|
a.apply(n, "Elt", nil, n.Elt)
|
||||||
|
|
||||||
|
case *ast.StructType:
|
||||||
|
a.apply(n, "Fields", nil, n.Fields)
|
||||||
|
|
||||||
|
case *ast.FuncType:
|
||||||
|
a.apply(n, "Params", nil, n.Params)
|
||||||
|
a.apply(n, "Results", nil, n.Results)
|
||||||
|
|
||||||
|
case *ast.InterfaceType:
|
||||||
|
a.apply(n, "Methods", nil, n.Methods)
|
||||||
|
|
||||||
|
case *ast.MapType:
|
||||||
|
a.apply(n, "Key", nil, n.Key)
|
||||||
|
a.apply(n, "Value", nil, n.Value)
|
||||||
|
|
||||||
|
case *ast.ChanType:
|
||||||
|
a.apply(n, "Value", nil, n.Value)
|
||||||
|
|
||||||
|
// Statements
|
||||||
|
case *ast.BadStmt:
|
||||||
|
// nothing to do
|
||||||
|
|
||||||
|
case *ast.DeclStmt:
|
||||||
|
a.apply(n, "Decl", nil, n.Decl)
|
||||||
|
|
||||||
|
case *ast.EmptyStmt:
|
||||||
|
// nothing to do
|
||||||
|
|
||||||
|
case *ast.LabeledStmt:
|
||||||
|
a.apply(n, "Label", nil, n.Label)
|
||||||
|
a.apply(n, "Stmt", nil, n.Stmt)
|
||||||
|
|
||||||
|
case *ast.ExprStmt:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
|
||||||
|
case *ast.SendStmt:
|
||||||
|
a.apply(n, "Chan", nil, n.Chan)
|
||||||
|
a.apply(n, "Value", nil, n.Value)
|
||||||
|
|
||||||
|
case *ast.IncDecStmt:
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
|
||||||
|
case *ast.AssignStmt:
|
||||||
|
a.applyList(n, "Lhs")
|
||||||
|
a.applyList(n, "Rhs")
|
||||||
|
|
||||||
|
case *ast.GoStmt:
|
||||||
|
a.apply(n, "Call", nil, n.Call)
|
||||||
|
|
||||||
|
case *ast.DeferStmt:
|
||||||
|
a.apply(n, "Call", nil, n.Call)
|
||||||
|
|
||||||
|
case *ast.ReturnStmt:
|
||||||
|
a.applyList(n, "Results")
|
||||||
|
|
||||||
|
case *ast.BranchStmt:
|
||||||
|
a.apply(n, "Label", nil, n.Label)
|
||||||
|
|
||||||
|
case *ast.BlockStmt:
|
||||||
|
a.applyList(n, "List")
|
||||||
|
|
||||||
|
case *ast.IfStmt:
|
||||||
|
a.apply(n, "Init", nil, n.Init)
|
||||||
|
a.apply(n, "Cond", nil, n.Cond)
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
a.apply(n, "Else", nil, n.Else)
|
||||||
|
|
||||||
|
case *ast.CaseClause:
|
||||||
|
a.applyList(n, "List")
|
||||||
|
a.applyList(n, "Body")
|
||||||
|
|
||||||
|
case *ast.SwitchStmt:
|
||||||
|
a.apply(n, "Init", nil, n.Init)
|
||||||
|
a.apply(n, "Tag", nil, n.Tag)
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
|
||||||
|
case *ast.TypeSwitchStmt:
|
||||||
|
a.apply(n, "Init", nil, n.Init)
|
||||||
|
a.apply(n, "Assign", nil, n.Assign)
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
|
||||||
|
case *ast.CommClause:
|
||||||
|
a.apply(n, "Comm", nil, n.Comm)
|
||||||
|
a.applyList(n, "Body")
|
||||||
|
|
||||||
|
case *ast.SelectStmt:
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
|
||||||
|
case *ast.ForStmt:
|
||||||
|
a.apply(n, "Init", nil, n.Init)
|
||||||
|
a.apply(n, "Cond", nil, n.Cond)
|
||||||
|
a.apply(n, "Post", nil, n.Post)
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
|
||||||
|
case *ast.RangeStmt:
|
||||||
|
a.apply(n, "Key", nil, n.Key)
|
||||||
|
a.apply(n, "Value", nil, n.Value)
|
||||||
|
a.apply(n, "X", nil, n.X)
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
|
||||||
|
// Declarations
|
||||||
|
case *ast.ImportSpec:
|
||||||
|
a.apply(n, "Doc", nil, n.Doc)
|
||||||
|
a.apply(n, "Name", nil, n.Name)
|
||||||
|
a.apply(n, "Path", nil, n.Path)
|
||||||
|
a.apply(n, "Comment", nil, n.Comment)
|
||||||
|
|
||||||
|
case *ast.ValueSpec:
|
||||||
|
a.apply(n, "Doc", nil, n.Doc)
|
||||||
|
a.applyList(n, "Names")
|
||||||
|
a.apply(n, "Type", nil, n.Type)
|
||||||
|
a.applyList(n, "Values")
|
||||||
|
a.apply(n, "Comment", nil, n.Comment)
|
||||||
|
|
||||||
|
case *ast.TypeSpec:
|
||||||
|
a.apply(n, "Doc", nil, n.Doc)
|
||||||
|
a.apply(n, "Name", nil, n.Name)
|
||||||
|
a.apply(n, "Type", nil, n.Type)
|
||||||
|
a.apply(n, "Comment", nil, n.Comment)
|
||||||
|
|
||||||
|
case *ast.BadDecl:
|
||||||
|
// nothing to do
|
||||||
|
|
||||||
|
case *ast.GenDecl:
|
||||||
|
a.apply(n, "Doc", nil, n.Doc)
|
||||||
|
a.applyList(n, "Specs")
|
||||||
|
|
||||||
|
case *ast.FuncDecl:
|
||||||
|
a.apply(n, "Doc", nil, n.Doc)
|
||||||
|
a.apply(n, "Recv", nil, n.Recv)
|
||||||
|
a.apply(n, "Name", nil, n.Name)
|
||||||
|
a.apply(n, "Type", nil, n.Type)
|
||||||
|
a.apply(n, "Body", nil, n.Body)
|
||||||
|
|
||||||
|
// Files and packages
|
||||||
|
case *ast.File:
|
||||||
|
a.apply(n, "Doc", nil, n.Doc)
|
||||||
|
a.apply(n, "Name", nil, n.Name)
|
||||||
|
a.applyList(n, "Decls")
|
||||||
|
// Don't walk n.Comments; they have either been walked already if
|
||||||
|
// they are Doc comments, or they can be easily walked explicitly.
|
||||||
|
|
||||||
|
case *ast.Package:
|
||||||
|
// collect and sort names for reproducible behavior
|
||||||
|
var names []string
|
||||||
|
for name := range n.Files {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
for _, name := range names {
|
||||||
|
a.apply(n, name, nil, n.Files[name])
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Apply: unexpected node type %T", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.post != nil && !a.post(&a.cursor) {
|
||||||
|
panic(abort)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.cursor = saved
|
||||||
|
}
|
||||||
|
|
||||||
|
// An iterator controls iteration over a slice of nodes.
|
||||||
|
type iterator struct {
|
||||||
|
index, step int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *application) applyList(parent ast.Node, name string) {
|
||||||
|
// avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
|
||||||
|
saved := a.iter
|
||||||
|
a.iter.index = 0
|
||||||
|
for {
|
||||||
|
// must reload parent.name each time, since cursor modifications might change it
|
||||||
|
v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
|
||||||
|
if a.iter.index >= v.Len() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// element x may be nil in a bad AST - be cautious
|
||||||
|
var x ast.Node
|
||||||
|
if e := v.Index(a.iter.index); e.IsValid() {
|
||||||
|
x = e.Interface().(ast.Node)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.iter.step = 1
|
||||||
|
a.apply(parent, name, &a.iter, x)
|
||||||
|
a.iter.index += a.iter.step
|
||||||
|
}
|
||||||
|
a.iter = saved
|
||||||
|
}
|
||||||
2
vendor/golang.org/x/tools/go/gcexportdata/BUILD
generated
vendored
2
vendor/golang.org/x/tools/go/gcexportdata/BUILD
generated
vendored
@@ -9,7 +9,7 @@ go_library(
|
|||||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/gcexportdata",
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/gcexportdata",
|
||||||
importpath = "golang.org/x/tools/go/gcexportdata",
|
importpath = "golang.org/x/tools/go/gcexportdata",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = ["//vendor/golang.org/x/tools/go/gcimporter15:go_default_library"],
|
deps = ["//vendor/golang.org/x/tools/go/internal/gcimporter:go_default_library"],
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
|
|||||||
23
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
23
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
@@ -7,9 +7,6 @@
|
|||||||
// gc compiler. This package supports go1.7 export data format and all
|
// gc compiler. This package supports go1.7 export data format and all
|
||||||
// later versions.
|
// later versions.
|
||||||
//
|
//
|
||||||
// This package replaces the deprecated golang.org/x/tools/go/gcimporter15
|
|
||||||
// package, which will be deleted in October 2017.
|
|
||||||
//
|
|
||||||
// Although it might seem convenient for this package to live alongside
|
// Although it might seem convenient for this package to live alongside
|
||||||
// go/types in the standard library, this would cause version skew
|
// go/types in the standard library, this would cause version skew
|
||||||
// problems for developer tools that use it, since they must be able to
|
// problems for developer tools that use it, since they must be able to
|
||||||
@@ -19,7 +16,7 @@
|
|||||||
// time before the Go 1.8 release and rebuild and redeploy their
|
// time before the Go 1.8 release and rebuild and redeploy their
|
||||||
// developer tools, which will then be able to consume both Go 1.7 and
|
// developer tools, which will then be able to consume both Go 1.7 and
|
||||||
// Go 1.8 export data files, so they will work before and after the
|
// Go 1.8 export data files, so they will work before and after the
|
||||||
// Go update. (See discussion at https://github.com/golang/go/issues/15651.)
|
// Go update. (See discussion at https://golang.org/issue/15651.)
|
||||||
//
|
//
|
||||||
package gcexportdata
|
package gcexportdata
|
||||||
|
|
||||||
@@ -32,7 +29,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
gcimporter "golang.org/x/tools/go/gcimporter15"
|
"golang.org/x/tools/go/internal/gcimporter"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Find returns the name of an object (.o) or archive (.a) file
|
// Find returns the name of an object (.o) or archive (.a) file
|
||||||
@@ -44,7 +41,7 @@ import (
|
|||||||
//
|
//
|
||||||
// Find also returns the package's resolved (canonical) import path,
|
// Find also returns the package's resolved (canonical) import path,
|
||||||
// reflecting the effects of srcDir and vendoring on importPath.
|
// reflecting the effects of srcDir and vendoring on importPath.
|
||||||
func Find(importPath string, srcDir string) (filename, path string) {
|
func Find(importPath, srcDir string) (filename, path string) {
|
||||||
return gcimporter.FindPkg(importPath, srcDir)
|
return gcimporter.FindPkg(importPath, srcDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,6 +85,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
|||||||
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
|
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The indexed export format starts with an 'i'; the older
|
||||||
|
// binary export format starts with a 'c', 'd', or 'v'
|
||||||
|
// (from "version"). Select appropriate importer.
|
||||||
|
if len(data) > 0 && data[0] == 'i' {
|
||||||
|
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
||||||
|
return pkg, err
|
||||||
|
}
|
||||||
|
|
||||||
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
|
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
|
||||||
return pkg, err
|
return pkg, err
|
||||||
}
|
}
|
||||||
@@ -95,6 +100,10 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
|||||||
// Write writes encoded type information for the specified package to out.
|
// Write writes encoded type information for the specified package to out.
|
||||||
// The FileSet provides file position information for named objects.
|
// The FileSet provides file position information for named objects.
|
||||||
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
||||||
_, err := out.Write(gcimporter.BExportData(fset, pkg))
|
b, err := gcimporter.BExportData(fset, pkg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = out.Write(b)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
99
vendor/golang.org/x/tools/go/gcexportdata/main.go
generated
vendored
Normal file
99
vendor/golang.org/x/tools/go/gcexportdata/main.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// The gcexportdata command is a diagnostic tool that displays the
|
||||||
|
// contents of gc export data files.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/gcexportdata"
|
||||||
|
"golang.org/x/tools/go/types/typeutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var packageFlag = flag.String("package", "", "alternative package to print")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetPrefix("gcexportdata: ")
|
||||||
|
log.SetFlags(0)
|
||||||
|
flag.Usage = func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a")
|
||||||
|
}
|
||||||
|
flag.Parse()
|
||||||
|
if flag.NArg() != 1 {
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
filename := flag.Args()[0]
|
||||||
|
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := gcexportdata.NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the package.
|
||||||
|
const primary = "<primary>"
|
||||||
|
imports := make(map[string]*types.Package)
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
pkg, err := gcexportdata.Read(r, fset, imports, primary)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("%s: %s", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optionally select an indirectly mentioned package.
|
||||||
|
if *packageFlag != "" {
|
||||||
|
pkg = imports[*packageFlag]
|
||||||
|
if pkg == nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n",
|
||||||
|
filename, *packageFlag)
|
||||||
|
for p := range imports {
|
||||||
|
if p != primary {
|
||||||
|
fmt.Fprintf(os.Stderr, "\t%s\n", p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print all package-level declarations, including non-exported ones.
|
||||||
|
fmt.Printf("package %s\n", pkg.Name())
|
||||||
|
for _, imp := range pkg.Imports() {
|
||||||
|
fmt.Printf("import %q\n", imp.Path())
|
||||||
|
}
|
||||||
|
qual := func(p *types.Package) string {
|
||||||
|
if pkg == p {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return p.Name()
|
||||||
|
}
|
||||||
|
scope := pkg.Scope()
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
obj := scope.Lookup(name)
|
||||||
|
fmt.Printf("%s: %s\n",
|
||||||
|
fset.Position(obj.Pos()),
|
||||||
|
types.ObjectString(obj, qual))
|
||||||
|
|
||||||
|
// For types, print each method.
|
||||||
|
if _, ok := obj.(*types.TypeName); ok {
|
||||||
|
for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) {
|
||||||
|
fmt.Printf("%s: %s\n",
|
||||||
|
fset.Position(method.Obj().Pos()),
|
||||||
|
types.SelectionString(method, qual))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
13
vendor/golang.org/x/tools/go/gcimporter15/isAlias18.go
generated
vendored
13
vendor/golang.org/x/tools/go/gcimporter15/isAlias18.go
generated
vendored
@@ -1,13 +0,0 @@
|
|||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package gcimporter
|
|
||||||
|
|
||||||
import "go/types"
|
|
||||||
|
|
||||||
func isAlias(obj *types.TypeName) bool {
|
|
||||||
return false // there are no type aliases before Go 1.9
|
|
||||||
}
|
|
||||||
13
vendor/golang.org/x/tools/go/gcimporter15/isAlias19.go
generated
vendored
13
vendor/golang.org/x/tools/go/gcimporter15/isAlias19.go
generated
vendored
@@ -1,13 +0,0 @@
|
|||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package gcimporter
|
|
||||||
|
|
||||||
import "go/types"
|
|
||||||
|
|
||||||
func isAlias(obj *types.TypeName) bool {
|
|
||||||
return obj.IsAlias()
|
|
||||||
}
|
|
||||||
26
vendor/golang.org/x/tools/go/internal/cgo/BUILD
generated
vendored
Normal file
26
vendor/golang.org/x/tools/go/internal/cgo/BUILD
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"cgo.go",
|
||||||
|
"cgo_pkgconfig.go",
|
||||||
|
],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/internal/cgo",
|
||||||
|
importpath = "golang.org/x/tools/go/internal/cgo",
|
||||||
|
visibility = ["//vendor/golang.org/x/tools/go:__subpackages__"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
220
vendor/golang.org/x/tools/go/internal/cgo/cgo.go
generated
vendored
Normal file
220
vendor/golang.org/x/tools/go/internal/cgo/cgo.go
generated
vendored
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cgo
|
||||||
|
|
||||||
|
// This file handles cgo preprocessing of files containing `import "C"`.
|
||||||
|
//
|
||||||
|
// DESIGN
|
||||||
|
//
|
||||||
|
// The approach taken is to run the cgo processor on the package's
|
||||||
|
// CgoFiles and parse the output, faking the filenames of the
|
||||||
|
// resulting ASTs so that the synthetic file containing the C types is
|
||||||
|
// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
|
||||||
|
// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
|
||||||
|
// not the names of the actual temporary files.
|
||||||
|
//
|
||||||
|
// The advantage of this approach is its fidelity to 'go build'. The
|
||||||
|
// downside is that the token.Position.Offset for each AST node is
|
||||||
|
// incorrect, being an offset within the temporary file. Line numbers
|
||||||
|
// should still be correct because of the //line comments.
|
||||||
|
//
|
||||||
|
// The logic of this file is mostly plundered from the 'go build'
|
||||||
|
// tool, which also invokes the cgo preprocessor.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// REJECTED ALTERNATIVE
|
||||||
|
//
|
||||||
|
// An alternative approach that we explored is to extend go/types'
|
||||||
|
// Importer mechanism to provide the identity of the importing package
|
||||||
|
// so that each time `import "C"` appears it resolves to a different
|
||||||
|
// synthetic package containing just the objects needed in that case.
|
||||||
|
// The loader would invoke cgo but parse only the cgo_types.go file
|
||||||
|
// defining the package-level objects, discarding the other files
|
||||||
|
// resulting from preprocessing.
|
||||||
|
//
|
||||||
|
// The benefit of this approach would have been that source-level
|
||||||
|
// syntax information would correspond exactly to the original cgo
|
||||||
|
// file, with no preprocessing involved, making source tools like
|
||||||
|
// godoc, guru, and eg happy. However, the approach was rejected
|
||||||
|
// due to the additional complexity it would impose on go/types. (It
|
||||||
|
// made for a beautiful demo, though.)
|
||||||
|
//
|
||||||
|
// cgo files, despite their *.go extension, are not legal Go source
|
||||||
|
// files per the specification since they may refer to unexported
|
||||||
|
// members of package "C" such as C.int. Also, a function such as
|
||||||
|
// C.getpwent has in effect two types, one matching its C type and one
|
||||||
|
// which additionally returns (errno C.int). The cgo preprocessor
|
||||||
|
// uses name mangling to distinguish these two functions in the
|
||||||
|
// processed code, but go/types would need to duplicate this logic in
|
||||||
|
// its handling of function calls, analogous to the treatment of map
|
||||||
|
// lookups in which y=m[k] and y,ok=m[k] are both legal.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/build"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
|
||||||
|
// the output and returns the resulting ASTs.
|
||||||
|
//
|
||||||
|
func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
|
||||||
|
tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpdir)
|
||||||
|
|
||||||
|
pkgdir := bp.Dir
|
||||||
|
if DisplayPath != nil {
|
||||||
|
pkgdir = DisplayPath(pkgdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var files []*ast.File
|
||||||
|
for i := range cgoFiles {
|
||||||
|
rd, err := os.Open(cgoFiles[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
|
||||||
|
f, err := parser.ParseFile(fset, display, rd, mode)
|
||||||
|
rd.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
files = append(files, f)
|
||||||
|
}
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cgoRe = regexp.MustCompile(`[/\\:]`)
|
||||||
|
|
||||||
|
// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
|
||||||
|
// lists of files: the resulting processed files (in temporary
|
||||||
|
// directory tmpdir) and the corresponding names of the unprocessed files.
|
||||||
|
//
|
||||||
|
// Run is adapted from (*builder).cgo in
|
||||||
|
// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
|
||||||
|
// Objective C, CGOPKGPATH, CGO_FLAGS.
|
||||||
|
//
|
||||||
|
// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
|
||||||
|
// to the cgo preprocessor. This in turn will set the // line comments
|
||||||
|
// referring to those files to use absolute paths. This is needed for
|
||||||
|
// go/packages using the legacy go list support so it is able to find
|
||||||
|
// the original files.
|
||||||
|
func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
|
||||||
|
cgoCPPFLAGS, _, _, _ := cflags(bp, true)
|
||||||
|
_, cgoexeCFLAGS, _, _ := cflags(bp, false)
|
||||||
|
|
||||||
|
if len(bp.CgoPkgConfig) > 0 {
|
||||||
|
pcCFLAGS, err := pkgConfigFlags(bp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allows including _cgo_export.h from .[ch] files in the package.
|
||||||
|
cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
|
||||||
|
|
||||||
|
// _cgo_gotypes.go (displayed "C") contains the type definitions.
|
||||||
|
files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
|
||||||
|
displayFiles = append(displayFiles, "C")
|
||||||
|
for _, fn := range bp.CgoFiles {
|
||||||
|
// "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
|
||||||
|
f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
|
||||||
|
files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
|
||||||
|
displayFiles = append(displayFiles, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cgoflags []string
|
||||||
|
if bp.Goroot && bp.ImportPath == "runtime/cgo" {
|
||||||
|
cgoflags = append(cgoflags, "-import_runtime_cgo=false")
|
||||||
|
}
|
||||||
|
if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
|
||||||
|
cgoflags = append(cgoflags, "-import_syscall=false")
|
||||||
|
}
|
||||||
|
|
||||||
|
var cgoFiles []string = bp.CgoFiles
|
||||||
|
if useabs {
|
||||||
|
cgoFiles = make([]string, len(bp.CgoFiles))
|
||||||
|
for i := range cgoFiles {
|
||||||
|
cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
args := stringList(
|
||||||
|
"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
|
||||||
|
cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
|
||||||
|
)
|
||||||
|
if false {
|
||||||
|
log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
|
||||||
|
}
|
||||||
|
cmd := exec.Command(args[0], args[1:]...)
|
||||||
|
cmd.Dir = pkgdir
|
||||||
|
cmd.Stdout = os.Stderr
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, displayFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- unmodified from 'go build' ---------------------------------------
|
||||||
|
|
||||||
|
// Return the flags to use when invoking the C or C++ compilers, or cgo.
|
||||||
|
func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
|
||||||
|
var defaults string
|
||||||
|
if def {
|
||||||
|
defaults = "-g -O2"
|
||||||
|
}
|
||||||
|
|
||||||
|
cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
|
||||||
|
cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
|
||||||
|
cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
|
||||||
|
ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// envList returns the value of the given environment variable broken
|
||||||
|
// into fields, using the default value when the variable is empty.
|
||||||
|
func envList(key, def string) []string {
|
||||||
|
v := os.Getenv(key)
|
||||||
|
if v == "" {
|
||||||
|
v = def
|
||||||
|
}
|
||||||
|
return strings.Fields(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringList's arguments should be a sequence of string or []string values.
|
||||||
|
// stringList flattens them into a single []string.
|
||||||
|
func stringList(args ...interface{}) []string {
|
||||||
|
var x []string
|
||||||
|
for _, arg := range args {
|
||||||
|
switch arg := arg.(type) {
|
||||||
|
case []string:
|
||||||
|
x = append(x, arg...)
|
||||||
|
case string:
|
||||||
|
x = append(x, arg)
|
||||||
|
default:
|
||||||
|
panic("stringList: invalid argument")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
39
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
generated
vendored
Normal file
39
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"go/build"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
|
||||||
|
func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
|
||||||
|
cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
|
||||||
|
if len(out) > 0 {
|
||||||
|
s = fmt.Sprintf("%s: %s", s, out)
|
||||||
|
}
|
||||||
|
return nil, errors.New(s)
|
||||||
|
}
|
||||||
|
if len(out) > 0 {
|
||||||
|
flags = strings.Fields(string(out))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// pkgConfigFlags calls pkg-config if needed and returns the cflags
|
||||||
|
// needed to build the package.
|
||||||
|
func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
|
||||||
|
if len(p.CgoPkgConfig) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return pkgConfig("--cflags", p.CgoPkgConfig)
|
||||||
|
}
|
||||||
@@ -7,12 +7,14 @@ go_library(
|
|||||||
"bimport.go",
|
"bimport.go",
|
||||||
"exportdata.go",
|
"exportdata.go",
|
||||||
"gcimporter.go",
|
"gcimporter.go",
|
||||||
"isAlias18.go",
|
"iexport.go",
|
||||||
"isAlias19.go",
|
"iimport.go",
|
||||||
|
"newInterface10.go",
|
||||||
|
"newInterface11.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/gcimporter15",
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/internal/gcimporter",
|
||||||
importpath = "golang.org/x/tools/go/gcimporter15",
|
importpath = "golang.org/x/tools/go/internal/gcimporter",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//vendor/golang.org/x/tools/go:__subpackages__"],
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
@@ -16,7 +16,6 @@ import (
|
|||||||
"go/constant"
|
"go/constant"
|
||||||
"go/token"
|
"go/token"
|
||||||
"go/types"
|
"go/types"
|
||||||
"log"
|
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
@@ -39,6 +38,11 @@ const debugFormat = false // default: false
|
|||||||
const trace = false // default: false
|
const trace = false // default: false
|
||||||
|
|
||||||
// Current export format version. Increase with each format change.
|
// Current export format version. Increase with each format change.
|
||||||
|
// Note: The latest binary (non-indexed) export format is at version 6.
|
||||||
|
// This exporter is still at level 4, but it doesn't matter since
|
||||||
|
// the binary importer can handle older versions just fine.
|
||||||
|
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
|
||||||
|
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
|
||||||
// 4: type name objects support type aliases, uses aliasTag
|
// 4: type name objects support type aliases, uses aliasTag
|
||||||
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
|
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
|
||||||
// 2: removed unused bool in ODCL export (compiler only)
|
// 2: removed unused bool in ODCL export (compiler only)
|
||||||
@@ -76,9 +80,29 @@ type exporter struct {
|
|||||||
indent int // for trace
|
indent int // for trace
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// internalError represents an error generated inside this package.
|
||||||
|
type internalError string
|
||||||
|
|
||||||
|
func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
||||||
|
|
||||||
|
func internalErrorf(format string, args ...interface{}) error {
|
||||||
|
return internalError(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
// BExportData returns binary export data for pkg.
|
// BExportData returns binary export data for pkg.
|
||||||
// If no file set is provided, position info will be missing.
|
// If no file set is provided, position info will be missing.
|
||||||
func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
|
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
if ierr, ok := e.(internalError); ok {
|
||||||
|
err = ierr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Not an internal error; panic again.
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
p := exporter{
|
p := exporter{
|
||||||
fset: fset,
|
fset: fset,
|
||||||
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
|
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
|
||||||
@@ -103,11 +127,11 @@ func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
|
|||||||
// --- generic export data ---
|
// --- generic export data ---
|
||||||
|
|
||||||
// populate type map with predeclared "known" types
|
// populate type map with predeclared "known" types
|
||||||
for index, typ := range predeclared {
|
for index, typ := range predeclared() {
|
||||||
p.typIndex[typ] = index
|
p.typIndex[typ] = index
|
||||||
}
|
}
|
||||||
if len(p.typIndex) != len(predeclared) {
|
if len(p.typIndex) != len(predeclared()) {
|
||||||
log.Fatalf("gcimporter: duplicate entries in type map?")
|
return nil, internalError("duplicate entries in type map?")
|
||||||
}
|
}
|
||||||
|
|
||||||
// write package data
|
// write package data
|
||||||
@@ -145,12 +169,12 @@ func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
|
|||||||
|
|
||||||
// --- end of export data ---
|
// --- end of export data ---
|
||||||
|
|
||||||
return p.out.Bytes()
|
return p.out.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
|
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
|
||||||
if pkg == nil {
|
if pkg == nil {
|
||||||
log.Fatalf("gcimporter: unexpected nil pkg")
|
panic(internalError("unexpected nil pkg"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we saw the package before, write its index (>= 0)
|
// if we saw the package before, write its index (>= 0)
|
||||||
@@ -185,7 +209,7 @@ func (p *exporter) obj(obj types.Object) {
|
|||||||
p.value(obj.Val())
|
p.value(obj.Val())
|
||||||
|
|
||||||
case *types.TypeName:
|
case *types.TypeName:
|
||||||
if isAlias(obj) {
|
if obj.IsAlias() {
|
||||||
p.tag(aliasTag)
|
p.tag(aliasTag)
|
||||||
p.pos(obj)
|
p.pos(obj)
|
||||||
p.qualifiedName(obj)
|
p.qualifiedName(obj)
|
||||||
@@ -209,7 +233,7 @@ func (p *exporter) obj(obj types.Object) {
|
|||||||
p.paramList(sig.Results(), false)
|
p.paramList(sig.Results(), false)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Fatalf("gcimporter: unexpected object %v (%T)", obj, obj)
|
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,7 +297,7 @@ func (p *exporter) qualifiedName(obj types.Object) {
|
|||||||
|
|
||||||
func (p *exporter) typ(t types.Type) {
|
func (p *exporter) typ(t types.Type) {
|
||||||
if t == nil {
|
if t == nil {
|
||||||
log.Fatalf("gcimporter: nil type")
|
panic(internalError("nil type"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Possible optimization: Anonymous pointer types *T where
|
// Possible optimization: Anonymous pointer types *T where
|
||||||
@@ -356,7 +380,7 @@ func (p *exporter) typ(t types.Type) {
|
|||||||
p.typ(t.Elem())
|
p.typ(t.Elem())
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Fatalf("gcimporter: unexpected type %T: %s", t, t)
|
panic(internalErrorf("unexpected type %T: %s", t, t))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -422,7 +446,7 @@ func (p *exporter) fieldList(t *types.Struct) {
|
|||||||
|
|
||||||
func (p *exporter) field(f *types.Var) {
|
func (p *exporter) field(f *types.Var) {
|
||||||
if !f.IsField() {
|
if !f.IsField() {
|
||||||
log.Fatalf("gcimporter: field expected")
|
panic(internalError("field expected"))
|
||||||
}
|
}
|
||||||
|
|
||||||
p.pos(f)
|
p.pos(f)
|
||||||
@@ -452,7 +476,7 @@ func (p *exporter) iface(t *types.Interface) {
|
|||||||
func (p *exporter) method(m *types.Func) {
|
func (p *exporter) method(m *types.Func) {
|
||||||
sig := m.Type().(*types.Signature)
|
sig := m.Type().(*types.Signature)
|
||||||
if sig.Recv() == nil {
|
if sig.Recv() == nil {
|
||||||
log.Fatalf("gcimporter: method expected")
|
panic(internalError("method expected"))
|
||||||
}
|
}
|
||||||
|
|
||||||
p.pos(m)
|
p.pos(m)
|
||||||
@@ -575,13 +599,13 @@ func (p *exporter) value(x constant.Value) {
|
|||||||
p.tag(unknownTag)
|
p.tag(unknownTag)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Fatalf("gcimporter: unexpected value %v (%T)", x, x)
|
panic(internalErrorf("unexpected value %v (%T)", x, x))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *exporter) float(x constant.Value) {
|
func (p *exporter) float(x constant.Value) {
|
||||||
if x.Kind() != constant.Float {
|
if x.Kind() != constant.Float {
|
||||||
log.Fatalf("gcimporter: unexpected constant %v, want float", x)
|
panic(internalErrorf("unexpected constant %v, want float", x))
|
||||||
}
|
}
|
||||||
// extract sign (there is no -0)
|
// extract sign (there is no -0)
|
||||||
sign := constant.Sign(x)
|
sign := constant.Sign(x)
|
||||||
@@ -616,7 +640,7 @@ func (p *exporter) float(x constant.Value) {
|
|||||||
m.SetMantExp(&m, int(m.MinPrec()))
|
m.SetMantExp(&m, int(m.MinPrec()))
|
||||||
mant, acc := m.Int(nil)
|
mant, acc := m.Int(nil)
|
||||||
if acc != big.Exact {
|
if acc != big.Exact {
|
||||||
log.Fatalf("gcimporter: internal error")
|
panic(internalError("internal error"))
|
||||||
}
|
}
|
||||||
|
|
||||||
p.int(sign)
|
p.int(sign)
|
||||||
@@ -653,7 +677,7 @@ func (p *exporter) bool(b bool) bool {
|
|||||||
|
|
||||||
func (p *exporter) index(marker byte, index int) {
|
func (p *exporter) index(marker byte, index int) {
|
||||||
if index < 0 {
|
if index < 0 {
|
||||||
log.Fatalf("gcimporter: invalid index < 0")
|
panic(internalError("invalid index < 0"))
|
||||||
}
|
}
|
||||||
if debugFormat {
|
if debugFormat {
|
||||||
p.marker('t')
|
p.marker('t')
|
||||||
@@ -666,7 +690,7 @@ func (p *exporter) index(marker byte, index int) {
|
|||||||
|
|
||||||
func (p *exporter) tag(tag int) {
|
func (p *exporter) tag(tag int) {
|
||||||
if tag >= 0 {
|
if tag >= 0 {
|
||||||
log.Fatalf("gcimporter: invalid tag >= 0")
|
panic(internalError("invalid tag >= 0"))
|
||||||
}
|
}
|
||||||
if debugFormat {
|
if debugFormat {
|
||||||
p.marker('t')
|
p.marker('t')
|
||||||
@@ -39,8 +39,7 @@ type importer struct {
|
|||||||
posInfoFormat bool
|
posInfoFormat bool
|
||||||
prevFile string
|
prevFile string
|
||||||
prevLine int
|
prevLine int
|
||||||
fset *token.FileSet
|
fake fakeFileSet
|
||||||
files map[string]*token.File
|
|
||||||
|
|
||||||
// debugging support
|
// debugging support
|
||||||
debugFormat bool
|
debugFormat bool
|
||||||
@@ -53,12 +52,16 @@ type importer struct {
|
|||||||
// compromised, an error is returned.
|
// compromised, an error is returned.
|
||||||
func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||||
// catch panics and return them as errors
|
// catch panics and return them as errors
|
||||||
|
const currentVersion = 6
|
||||||
|
version := -1 // unknown version
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := recover(); e != nil {
|
if e := recover(); e != nil {
|
||||||
// The package (filename) causing the problem is added to this
|
|
||||||
// error by a wrapper in the caller (Import in gcimporter.go).
|
|
||||||
// Return a (possibly nil or incomplete) package unchanged (see #16088).
|
// Return a (possibly nil or incomplete) package unchanged (see #16088).
|
||||||
err = fmt.Errorf("cannot import, possibly version skew (%v) - reinstall package", e)
|
if version > currentVersion {
|
||||||
|
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -66,11 +69,13 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||||||
imports: imports,
|
imports: imports,
|
||||||
data: data,
|
data: data,
|
||||||
importpath: path,
|
importpath: path,
|
||||||
version: -1, // unknown version
|
version: version,
|
||||||
strList: []string{""}, // empty string is mapped to 0
|
strList: []string{""}, // empty string is mapped to 0
|
||||||
pathList: []string{""}, // empty string is mapped to 0
|
pathList: []string{""}, // empty string is mapped to 0
|
||||||
|
fake: fakeFileSet{
|
||||||
fset: fset,
|
fset: fset,
|
||||||
files: make(map[string]*token.File),
|
files: make(map[string]*token.File),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// read version info
|
// read version info
|
||||||
@@ -89,7 +94,7 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||||||
p.posInfoFormat = p.int() != 0
|
p.posInfoFormat = p.int() != 0
|
||||||
versionstr = p.string()
|
versionstr = p.string()
|
||||||
if versionstr == "v1" {
|
if versionstr == "v1" {
|
||||||
p.version = 0
|
version = 0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Go1.8 extensible encoding
|
// Go1.8 extensible encoding
|
||||||
@@ -97,35 +102,36 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||||||
versionstr = p.rawStringln(b)
|
versionstr = p.rawStringln(b)
|
||||||
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
|
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
|
||||||
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
|
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
|
||||||
p.version = v
|
version = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
p.version = version
|
||||||
|
|
||||||
// read version specific flags - extend as necessary
|
// read version specific flags - extend as necessary
|
||||||
switch p.version {
|
switch p.version {
|
||||||
// case 6:
|
// case currentVersion:
|
||||||
// ...
|
// ...
|
||||||
// fallthrough
|
// fallthrough
|
||||||
case 5, 4, 3, 2, 1:
|
case currentVersion, 5, 4, 3, 2, 1:
|
||||||
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
|
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
|
||||||
p.trackAllTypes = p.int() != 0
|
p.trackAllTypes = p.int() != 0
|
||||||
p.posInfoFormat = p.int() != 0
|
p.posInfoFormat = p.int() != 0
|
||||||
case 0:
|
case 0:
|
||||||
// Go1.7 encoding format - nothing to do here
|
// Go1.7 encoding format - nothing to do here
|
||||||
default:
|
default:
|
||||||
errorf("unknown export format version %d (%q)", p.version, versionstr)
|
errorf("unknown bexport format version %d (%q)", p.version, versionstr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- generic export data ---
|
// --- generic export data ---
|
||||||
|
|
||||||
// populate typList with predeclared "known" types
|
// populate typList with predeclared "known" types
|
||||||
p.typList = append(p.typList, predeclared...)
|
p.typList = append(p.typList, predeclared()...)
|
||||||
|
|
||||||
// read package data
|
// read package data
|
||||||
pkg = p.pkg()
|
pkg = p.pkg()
|
||||||
|
|
||||||
// read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go)
|
// read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
|
||||||
objcount := 0
|
objcount := 0
|
||||||
for {
|
for {
|
||||||
tag := p.tagOrIndex()
|
tag := p.tagOrIndex()
|
||||||
@@ -184,6 +190,9 @@ func (p *importer) pkg() *types.Package {
|
|||||||
} else {
|
} else {
|
||||||
path = p.string()
|
path = p.string()
|
||||||
}
|
}
|
||||||
|
if p.version >= 6 {
|
||||||
|
p.int() // package height; unused by go/types
|
||||||
|
}
|
||||||
|
|
||||||
// we should never see an empty package name
|
// we should never see an empty package name
|
||||||
if name == "" {
|
if name == "" {
|
||||||
@@ -259,7 +268,7 @@ func (p *importer) obj(tag int) {
|
|||||||
case constTag:
|
case constTag:
|
||||||
pos := p.pos()
|
pos := p.pos()
|
||||||
pkg, name := p.qualifiedName()
|
pkg, name := p.qualifiedName()
|
||||||
typ := p.typ(nil)
|
typ := p.typ(nil, nil)
|
||||||
val := p.value()
|
val := p.value()
|
||||||
p.declare(types.NewConst(pos, pkg, name, typ, val))
|
p.declare(types.NewConst(pos, pkg, name, typ, val))
|
||||||
|
|
||||||
@@ -267,16 +276,16 @@ func (p *importer) obj(tag int) {
|
|||||||
// TODO(gri) verify type alias hookup is correct
|
// TODO(gri) verify type alias hookup is correct
|
||||||
pos := p.pos()
|
pos := p.pos()
|
||||||
pkg, name := p.qualifiedName()
|
pkg, name := p.qualifiedName()
|
||||||
typ := p.typ(nil)
|
typ := p.typ(nil, nil)
|
||||||
p.declare(types.NewTypeName(pos, pkg, name, typ))
|
p.declare(types.NewTypeName(pos, pkg, name, typ))
|
||||||
|
|
||||||
case typeTag:
|
case typeTag:
|
||||||
p.typ(nil)
|
p.typ(nil, nil)
|
||||||
|
|
||||||
case varTag:
|
case varTag:
|
||||||
pos := p.pos()
|
pos := p.pos()
|
||||||
pkg, name := p.qualifiedName()
|
pkg, name := p.qualifiedName()
|
||||||
typ := p.typ(nil)
|
typ := p.typ(nil, nil)
|
||||||
p.declare(types.NewVar(pos, pkg, name, typ))
|
p.declare(types.NewVar(pos, pkg, name, typ))
|
||||||
|
|
||||||
case funcTag:
|
case funcTag:
|
||||||
@@ -323,15 +332,23 @@ func (p *importer) pos() token.Pos {
|
|||||||
p.prevFile = file
|
p.prevFile = file
|
||||||
p.prevLine = line
|
p.prevLine = line
|
||||||
|
|
||||||
// Synthesize a token.Pos
|
return p.fake.pos(file, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Synthesize a token.Pos
|
||||||
|
type fakeFileSet struct {
|
||||||
|
fset *token.FileSet
|
||||||
|
files map[string]*token.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *fakeFileSet) pos(file string, line int) token.Pos {
|
||||||
// Since we don't know the set of needed file positions, we
|
// Since we don't know the set of needed file positions, we
|
||||||
// reserve maxlines positions per file.
|
// reserve maxlines positions per file.
|
||||||
const maxlines = 64 * 1024
|
const maxlines = 64 * 1024
|
||||||
f := p.files[file]
|
f := s.files[file]
|
||||||
if f == nil {
|
if f == nil {
|
||||||
f = p.fset.AddFile(file, -1, maxlines)
|
f = s.fset.AddFile(file, -1, maxlines)
|
||||||
p.files[file] = f
|
s.files[file] = f
|
||||||
// Allocate the fake linebreak indices on first use.
|
// Allocate the fake linebreak indices on first use.
|
||||||
// TODO(adonovan): opt: save ~512KB using a more complex scheme?
|
// TODO(adonovan): opt: save ~512KB using a more complex scheme?
|
||||||
fakeLinesOnce.Do(func() {
|
fakeLinesOnce.Do(func() {
|
||||||
@@ -381,7 +398,11 @@ func (t *dddSlice) String() string { return "..." + t.elem.String() }
|
|||||||
// the package currently imported. The parent package is needed for
|
// the package currently imported. The parent package is needed for
|
||||||
// exported struct fields and interface methods which don't contain
|
// exported struct fields and interface methods which don't contain
|
||||||
// explicit package information in the export data.
|
// explicit package information in the export data.
|
||||||
func (p *importer) typ(parent *types.Package) types.Type {
|
//
|
||||||
|
// A non-nil tname is used as the "owner" of the result type; i.e.,
|
||||||
|
// the result type is the underlying type of tname. tname is used
|
||||||
|
// to give interface methods a named receiver type where possible.
|
||||||
|
func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
|
||||||
// if the type was seen before, i is its index (>= 0)
|
// if the type was seen before, i is its index (>= 0)
|
||||||
i := p.tagOrIndex()
|
i := p.tagOrIndex()
|
||||||
if i >= 0 {
|
if i >= 0 {
|
||||||
@@ -411,15 +432,15 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
|
t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
|
||||||
|
|
||||||
// but record the existing type, if any
|
// but record the existing type, if any
|
||||||
t := obj.Type().(*types.Named)
|
tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
|
||||||
p.record(t)
|
p.record(tname)
|
||||||
|
|
||||||
// read underlying type
|
// read underlying type
|
||||||
t0.SetUnderlying(p.typ(parent))
|
t0.SetUnderlying(p.typ(parent, t0))
|
||||||
|
|
||||||
// interfaces don't have associated methods
|
// interfaces don't have associated methods
|
||||||
if types.IsInterface(t0) {
|
if types.IsInterface(t0) {
|
||||||
return t
|
return tname
|
||||||
}
|
}
|
||||||
|
|
||||||
// read associated methods
|
// read associated methods
|
||||||
@@ -440,7 +461,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
t0.AddMethod(types.NewFunc(pos, parent, name, sig))
|
t0.AddMethod(types.NewFunc(pos, parent, name, sig))
|
||||||
}
|
}
|
||||||
|
|
||||||
return t
|
return tname
|
||||||
|
|
||||||
case arrayTag:
|
case arrayTag:
|
||||||
t := new(types.Array)
|
t := new(types.Array)
|
||||||
@@ -449,7 +470,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
}
|
}
|
||||||
|
|
||||||
n := p.int64()
|
n := p.int64()
|
||||||
*t = *types.NewArray(p.typ(parent), n)
|
*t = *types.NewArray(p.typ(parent, nil), n)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case sliceTag:
|
case sliceTag:
|
||||||
@@ -458,7 +479,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
p.record(t)
|
p.record(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
*t = *types.NewSlice(p.typ(parent))
|
*t = *types.NewSlice(p.typ(parent, nil))
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case dddTag:
|
case dddTag:
|
||||||
@@ -467,7 +488,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
p.record(t)
|
p.record(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.elem = p.typ(parent)
|
t.elem = p.typ(parent, nil)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case structTag:
|
case structTag:
|
||||||
@@ -485,7 +506,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
p.record(t)
|
p.record(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
*t = *types.NewPointer(p.typ(parent))
|
*t = *types.NewPointer(p.typ(parent, nil))
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case signatureTag:
|
case signatureTag:
|
||||||
@@ -504,18 +525,20 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
// cannot expect the interface type to appear in a cycle, as any
|
// cannot expect the interface type to appear in a cycle, as any
|
||||||
// such cycle must contain a named type which would have been
|
// such cycle must contain a named type which would have been
|
||||||
// first defined earlier.
|
// first defined earlier.
|
||||||
|
// TODO(gri) Is this still true now that we have type aliases?
|
||||||
|
// See issue #23225.
|
||||||
n := len(p.typList)
|
n := len(p.typList)
|
||||||
if p.trackAllTypes {
|
if p.trackAllTypes {
|
||||||
p.record(nil)
|
p.record(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var embeddeds []*types.Named
|
var embeddeds []types.Type
|
||||||
for n := p.int(); n > 0; n-- {
|
for n := p.int(); n > 0; n-- {
|
||||||
p.pos()
|
p.pos()
|
||||||
embeddeds = append(embeddeds, p.typ(parent).(*types.Named))
|
embeddeds = append(embeddeds, p.typ(parent, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
t := types.NewInterface(p.methodList(parent), embeddeds)
|
t := newInterface(p.methodList(parent, tname), embeddeds)
|
||||||
p.interfaceList = append(p.interfaceList, t)
|
p.interfaceList = append(p.interfaceList, t)
|
||||||
if p.trackAllTypes {
|
if p.trackAllTypes {
|
||||||
p.typList[n] = t
|
p.typList[n] = t
|
||||||
@@ -528,8 +551,8 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
p.record(t)
|
p.record(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
key := p.typ(parent)
|
key := p.typ(parent, nil)
|
||||||
val := p.typ(parent)
|
val := p.typ(parent, nil)
|
||||||
*t = *types.NewMap(key, val)
|
*t = *types.NewMap(key, val)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
@@ -539,19 +562,8 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
p.record(t)
|
p.record(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
var dir types.ChanDir
|
dir := chanDir(p.int())
|
||||||
// tag values must match the constants in cmd/compile/internal/gc/go.go
|
val := p.typ(parent, nil)
|
||||||
switch d := p.int(); d {
|
|
||||||
case 1 /* Crecv */ :
|
|
||||||
dir = types.RecvOnly
|
|
||||||
case 2 /* Csend */ :
|
|
||||||
dir = types.SendOnly
|
|
||||||
case 3 /* Cboth */ :
|
|
||||||
dir = types.SendRecv
|
|
||||||
default:
|
|
||||||
errorf("unexpected channel dir %d", d)
|
|
||||||
}
|
|
||||||
val := p.typ(parent)
|
|
||||||
*t = *types.NewChan(dir, val)
|
*t = *types.NewChan(dir, val)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
@@ -561,6 +573,21 @@ func (p *importer) typ(parent *types.Package) types.Type {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func chanDir(d int) types.ChanDir {
|
||||||
|
// tag values must match the constants in cmd/compile/internal/gc/go.go
|
||||||
|
switch d {
|
||||||
|
case 1 /* Crecv */ :
|
||||||
|
return types.RecvOnly
|
||||||
|
case 2 /* Csend */ :
|
||||||
|
return types.SendOnly
|
||||||
|
case 3 /* Cboth */ :
|
||||||
|
return types.SendRecv
|
||||||
|
default:
|
||||||
|
errorf("unexpected channel dir %d", d)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
|
func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
|
||||||
if n := p.int(); n > 0 {
|
if n := p.int(); n > 0 {
|
||||||
fields = make([]*types.Var, n)
|
fields = make([]*types.Var, n)
|
||||||
@@ -575,7 +602,7 @@ func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags [
|
|||||||
func (p *importer) field(parent *types.Package) (*types.Var, string) {
|
func (p *importer) field(parent *types.Package) (*types.Var, string) {
|
||||||
pos := p.pos()
|
pos := p.pos()
|
||||||
pkg, name, alias := p.fieldName(parent)
|
pkg, name, alias := p.fieldName(parent)
|
||||||
typ := p.typ(parent)
|
typ := p.typ(parent, nil)
|
||||||
tag := p.string()
|
tag := p.string()
|
||||||
|
|
||||||
anonymous := false
|
anonymous := false
|
||||||
@@ -599,22 +626,30 @@ func (p *importer) field(parent *types.Package) (*types.Var, string) {
|
|||||||
return types.NewField(pos, pkg, name, typ, anonymous), tag
|
return types.NewField(pos, pkg, name, typ, anonymous), tag
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) methodList(parent *types.Package) (methods []*types.Func) {
|
func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
|
||||||
if n := p.int(); n > 0 {
|
if n := p.int(); n > 0 {
|
||||||
methods = make([]*types.Func, n)
|
methods = make([]*types.Func, n)
|
||||||
for i := range methods {
|
for i := range methods {
|
||||||
methods[i] = p.method(parent)
|
methods[i] = p.method(parent, baseType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) method(parent *types.Package) *types.Func {
|
func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
|
||||||
pos := p.pos()
|
pos := p.pos()
|
||||||
pkg, name, _ := p.fieldName(parent)
|
pkg, name, _ := p.fieldName(parent)
|
||||||
|
// If we don't have a baseType, use a nil receiver.
|
||||||
|
// A receiver using the actual interface type (which
|
||||||
|
// we don't know yet) will be filled in when we call
|
||||||
|
// types.Interface.Complete.
|
||||||
|
var recv *types.Var
|
||||||
|
if baseType != nil {
|
||||||
|
recv = types.NewVar(token.NoPos, parent, "", baseType)
|
||||||
|
}
|
||||||
params, isddd := p.paramList()
|
params, isddd := p.paramList()
|
||||||
result, _ := p.paramList()
|
result, _ := p.paramList()
|
||||||
sig := types.NewSignature(nil, params, result, isddd)
|
sig := types.NewSignature(recv, params, result, isddd)
|
||||||
return types.NewFunc(pos, pkg, name, sig)
|
return types.NewFunc(pos, pkg, name, sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -670,7 +705,7 @@ func (p *importer) paramList() (*types.Tuple, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) param(named bool) (*types.Var, bool) {
|
func (p *importer) param(named bool) (*types.Var, bool) {
|
||||||
t := p.typ(nil)
|
t := p.typ(nil, nil)
|
||||||
td, isddd := t.(*dddSlice)
|
td, isddd := t.(*dddSlice)
|
||||||
if isddd {
|
if isddd {
|
||||||
t = types.NewSlice(td.elem)
|
t = types.NewSlice(td.elem)
|
||||||
@@ -941,8 +976,13 @@ const (
|
|||||||
aliasTag
|
aliasTag
|
||||||
)
|
)
|
||||||
|
|
||||||
var predeclared = []types.Type{
|
var predecl []types.Type // initialized lazily
|
||||||
// basic types
|
|
||||||
|
func predeclared() []types.Type {
|
||||||
|
if predecl == nil {
|
||||||
|
// initialize lazily to be sure that all
|
||||||
|
// elements have been initialized before
|
||||||
|
predecl = []types.Type{ // basic types
|
||||||
types.Typ[types.Bool],
|
types.Typ[types.Bool],
|
||||||
types.Typ[types.Int],
|
types.Typ[types.Int],
|
||||||
types.Typ[types.Int8],
|
types.Typ[types.Int8],
|
||||||
@@ -986,6 +1026,9 @@ var predeclared = []types.Type{
|
|||||||
// used internally by gc; never used by this package or in .a files
|
// used internally by gc; never used by this package or in .a files
|
||||||
anyType{},
|
anyType{},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return predecl
|
||||||
|
}
|
||||||
|
|
||||||
type anyType struct{}
|
type anyType struct{}
|
||||||
|
|
||||||
@@ -2,17 +2,13 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
|
// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
|
||||||
// but it also contains the original source-based importer code for Go1.6.
|
// but it also contains the original source-based importer code for Go1.6.
|
||||||
// Once we stop supporting 1.6, we can remove that code.
|
// Once we stop supporting 1.6, we can remove that code.
|
||||||
|
|
||||||
// Package gcimporter15 provides various functions for reading
|
// Package gcimporter provides various functions for reading
|
||||||
// gc-generated object files that can be used to implement the
|
// gc-generated object files that can be used to implement the
|
||||||
// Importer interface defined by the Go 1.5 standard library package.
|
// Importer interface defined by the Go 1.5 standard library package.
|
||||||
//
|
|
||||||
// Deprecated: this package will be deleted in October 2017.
|
|
||||||
// New code should use golang.org/x/tools/go/gcexportdata.
|
|
||||||
//
|
|
||||||
package gcimporter
|
package gcimporter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -20,7 +16,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/build"
|
"go/build"
|
||||||
exact "go/constant"
|
"go/constant"
|
||||||
"go/token"
|
"go/token"
|
||||||
"go/types"
|
"go/types"
|
||||||
"io"
|
"io"
|
||||||
@@ -59,6 +55,7 @@ func FindPkg(path, srcDir string) (filename, id string) {
|
|||||||
}
|
}
|
||||||
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
|
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
|
||||||
if bp.PkgObj == "" {
|
if bp.PkgObj == "" {
|
||||||
|
id = path // make sure we have an id to print in error message
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
||||||
@@ -131,14 +128,33 @@ func ImportData(packages map[string]*types.Package, filename, id string, data io
|
|||||||
// the corresponding package object to the packages map, and returns the object.
|
// the corresponding package object to the packages map, and returns the object.
|
||||||
// The packages map must contain all packages already imported.
|
// The packages map must contain all packages already imported.
|
||||||
//
|
//
|
||||||
func Import(packages map[string]*types.Package, path, srcDir string) (pkg *types.Package, err error) {
|
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
|
||||||
filename, id := FindPkg(path, srcDir)
|
var rc io.ReadCloser
|
||||||
|
var filename, id string
|
||||||
|
if lookup != nil {
|
||||||
|
// With custom lookup specified, assume that caller has
|
||||||
|
// converted path to a canonical import path for use in the map.
|
||||||
|
if path == "unsafe" {
|
||||||
|
return types.Unsafe, nil
|
||||||
|
}
|
||||||
|
id = path
|
||||||
|
|
||||||
|
// No need to re-import if the package was imported completely before.
|
||||||
|
if pkg = packages[id]; pkg != nil && pkg.Complete() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f, err := lookup(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rc = f
|
||||||
|
} else {
|
||||||
|
filename, id = FindPkg(path, srcDir)
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
if path == "unsafe" {
|
if path == "unsafe" {
|
||||||
return types.Unsafe, nil
|
return types.Unsafe, nil
|
||||||
}
|
}
|
||||||
err = fmt.Errorf("can't find import: %s", id)
|
return nil, fmt.Errorf("can't find import: %q", id)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// no need to re-import if the package was imported completely before
|
// no need to re-import if the package was imported completely before
|
||||||
@@ -149,33 +165,54 @@ func Import(packages map[string]*types.Package, path, srcDir string) (pkg *types
|
|||||||
// open file
|
// open file
|
||||||
f, err := os.Open(filename)
|
f, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
f.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// add file name to error
|
// add file name to error
|
||||||
err = fmt.Errorf("reading export data: %s: %v", filename, err)
|
err = fmt.Errorf("%s: %v", filename, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
rc = f
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
var hdr string
|
var hdr string
|
||||||
buf := bufio.NewReader(f)
|
buf := bufio.NewReader(rc)
|
||||||
if hdr, err = FindExportData(buf); err != nil {
|
if hdr, err = FindExportData(buf); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch hdr {
|
switch hdr {
|
||||||
case "$$\n":
|
case "$$\n":
|
||||||
|
// Work-around if we don't have a filename; happens only if lookup != nil.
|
||||||
|
// Either way, the filename is only needed for importer error messages, so
|
||||||
|
// this is fine.
|
||||||
|
if filename == "" {
|
||||||
|
filename = path
|
||||||
|
}
|
||||||
return ImportData(packages, filename, id, buf)
|
return ImportData(packages, filename, id, buf)
|
||||||
|
|
||||||
case "$$B\n":
|
case "$$B\n":
|
||||||
var data []byte
|
var data []byte
|
||||||
data, err = ioutil.ReadAll(buf)
|
data, err = ioutil.ReadAll(buf)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
fset := token.NewFileSet()
|
break
|
||||||
_, pkg, err = BImportData(fset, packages, data, id)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(gri): allow clients of go/importer to provide a FileSet.
|
||||||
|
// Or, define a new standard go/types/gcexportdata package.
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
|
||||||
|
// The indexed export format starts with an 'i'; the older
|
||||||
|
// binary export format starts with a 'c', 'd', or 'v'
|
||||||
|
// (from "version"). Select appropriate importer.
|
||||||
|
if len(data) > 0 && data[0] == 'i' {
|
||||||
|
_, pkg, err = IImportData(fset, packages, data[1:], id)
|
||||||
|
} else {
|
||||||
|
_, pkg, err = BImportData(fset, packages, data, id)
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||||
}
|
}
|
||||||
@@ -767,9 +804,9 @@ func (p *parser) parseInt() string {
|
|||||||
|
|
||||||
// number = int_lit [ "p" int_lit ] .
|
// number = int_lit [ "p" int_lit ] .
|
||||||
//
|
//
|
||||||
func (p *parser) parseNumber() (typ *types.Basic, val exact.Value) {
|
func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
|
||||||
// mantissa
|
// mantissa
|
||||||
mant := exact.MakeFromLiteral(p.parseInt(), token.INT, 0)
|
mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
|
||||||
if mant == nil {
|
if mant == nil {
|
||||||
panic("invalid mantissa")
|
panic("invalid mantissa")
|
||||||
}
|
}
|
||||||
@@ -782,14 +819,14 @@ func (p *parser) parseNumber() (typ *types.Basic, val exact.Value) {
|
|||||||
p.error(err)
|
p.error(err)
|
||||||
}
|
}
|
||||||
if exp < 0 {
|
if exp < 0 {
|
||||||
denom := exact.MakeInt64(1)
|
denom := constant.MakeInt64(1)
|
||||||
denom = exact.Shift(denom, token.SHL, uint(-exp))
|
denom = constant.Shift(denom, token.SHL, uint(-exp))
|
||||||
typ = types.Typ[types.UntypedFloat]
|
typ = types.Typ[types.UntypedFloat]
|
||||||
val = exact.BinaryOp(mant, token.QUO, denom)
|
val = constant.BinaryOp(mant, token.QUO, denom)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if exp > 0 {
|
if exp > 0 {
|
||||||
mant = exact.Shift(mant, token.SHL, uint(exp))
|
mant = constant.Shift(mant, token.SHL, uint(exp))
|
||||||
}
|
}
|
||||||
typ = types.Typ[types.UntypedFloat]
|
typ = types.Typ[types.UntypedFloat]
|
||||||
val = mant
|
val = mant
|
||||||
@@ -820,7 +857,7 @@ func (p *parser) parseConstDecl() {
|
|||||||
|
|
||||||
p.expect('=')
|
p.expect('=')
|
||||||
var typ types.Type
|
var typ types.Type
|
||||||
var val exact.Value
|
var val constant.Value
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
case scanner.Ident:
|
case scanner.Ident:
|
||||||
// bool_lit
|
// bool_lit
|
||||||
@@ -828,7 +865,7 @@ func (p *parser) parseConstDecl() {
|
|||||||
p.error("expected true or false")
|
p.error("expected true or false")
|
||||||
}
|
}
|
||||||
typ = types.Typ[types.UntypedBool]
|
typ = types.Typ[types.UntypedBool]
|
||||||
val = exact.MakeBool(p.lit == "true")
|
val = constant.MakeBool(p.lit == "true")
|
||||||
p.next()
|
p.next()
|
||||||
|
|
||||||
case '-', scanner.Int:
|
case '-', scanner.Int:
|
||||||
@@ -852,18 +889,18 @@ func (p *parser) parseConstDecl() {
|
|||||||
p.expectKeyword("i")
|
p.expectKeyword("i")
|
||||||
p.expect(')')
|
p.expect(')')
|
||||||
typ = types.Typ[types.UntypedComplex]
|
typ = types.Typ[types.UntypedComplex]
|
||||||
val = exact.BinaryOp(re, token.ADD, exact.MakeImag(im))
|
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||||
|
|
||||||
case scanner.Char:
|
case scanner.Char:
|
||||||
// rune_lit
|
// rune_lit
|
||||||
typ = types.Typ[types.UntypedRune]
|
typ = types.Typ[types.UntypedRune]
|
||||||
val = exact.MakeFromLiteral(p.lit, token.CHAR, 0)
|
val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
|
||||||
p.next()
|
p.next()
|
||||||
|
|
||||||
case scanner.String:
|
case scanner.String:
|
||||||
// string_lit
|
// string_lit
|
||||||
typ = types.Typ[types.UntypedString]
|
typ = types.Typ[types.UntypedString]
|
||||||
val = exact.MakeFromLiteral(p.lit, token.STRING, 0)
|
val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
|
||||||
p.next()
|
p.next()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
723
vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
generated
vendored
Normal file
723
vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
generated
vendored
Normal file
@@ -0,0 +1,723 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Indexed binary package export.
|
||||||
|
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
|
||||||
|
// see that file for specification of the format.
|
||||||
|
|
||||||
|
// +build go1.11
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"go/ast"
|
||||||
|
"go/constant"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Current indexed export format version. Increase with each format change.
|
||||||
|
// 0: Go1.11 encoding
|
||||||
|
const iexportVersion = 0
|
||||||
|
|
||||||
|
// IExportData returns the binary export data for pkg.
|
||||||
|
// If no file set is provided, position info will be missing.
|
||||||
|
func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
if ierr, ok := e.(internalError); ok {
|
||||||
|
err = ierr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Not an internal error; panic again.
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
p := iexporter{
|
||||||
|
out: bytes.NewBuffer(nil),
|
||||||
|
fset: fset,
|
||||||
|
allPkgs: map[*types.Package]bool{},
|
||||||
|
stringIndex: map[string]uint64{},
|
||||||
|
declIndex: map[types.Object]uint64{},
|
||||||
|
typIndex: map[types.Type]uint64{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, pt := range predeclared() {
|
||||||
|
p.typIndex[pt] = uint64(i)
|
||||||
|
}
|
||||||
|
if len(p.typIndex) > predeclReserved {
|
||||||
|
panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize work queue with exported declarations.
|
||||||
|
scope := pkg.Scope()
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
if ast.IsExported(name) {
|
||||||
|
p.pushDecl(scope.Lookup(name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop until no more work.
|
||||||
|
for !p.declTodo.empty() {
|
||||||
|
p.doDecl(p.declTodo.popHead())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append indices to data0 section.
|
||||||
|
dataLen := uint64(p.data0.Len())
|
||||||
|
w := p.newWriter()
|
||||||
|
w.writeIndex(p.declIndex, pkg)
|
||||||
|
w.flush()
|
||||||
|
|
||||||
|
// Assemble header.
|
||||||
|
var hdr intWriter
|
||||||
|
hdr.WriteByte('i')
|
||||||
|
hdr.uint64(iexportVersion)
|
||||||
|
hdr.uint64(uint64(p.strings.Len()))
|
||||||
|
hdr.uint64(dataLen)
|
||||||
|
|
||||||
|
// Flush output.
|
||||||
|
io.Copy(p.out, &hdr)
|
||||||
|
io.Copy(p.out, &p.strings)
|
||||||
|
io.Copy(p.out, &p.data0)
|
||||||
|
|
||||||
|
return p.out.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeIndex writes out an object index. mainIndex indicates whether
|
||||||
|
// we're writing out the main index, which is also read by
|
||||||
|
// non-compiler tools and includes a complete package description
|
||||||
|
// (i.e., name and height).
|
||||||
|
func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
|
||||||
|
// Build a map from packages to objects from that package.
|
||||||
|
pkgObjs := map[*types.Package][]types.Object{}
|
||||||
|
|
||||||
|
// For the main index, make sure to include every package that
|
||||||
|
// we reference, even if we're not exporting (or reexporting)
|
||||||
|
// any symbols from it.
|
||||||
|
pkgObjs[localpkg] = nil
|
||||||
|
for pkg := range w.p.allPkgs {
|
||||||
|
pkgObjs[pkg] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for obj := range index {
|
||||||
|
pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkgs []*types.Package
|
||||||
|
for pkg, objs := range pkgObjs {
|
||||||
|
pkgs = append(pkgs, pkg)
|
||||||
|
|
||||||
|
sort.Slice(objs, func(i, j int) bool {
|
||||||
|
return objs[i].Name() < objs[j].Name()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(pkgs, func(i, j int) bool {
|
||||||
|
return pkgs[i].Path() < pkgs[j].Path()
|
||||||
|
})
|
||||||
|
|
||||||
|
w.uint64(uint64(len(pkgs)))
|
||||||
|
for _, pkg := range pkgs {
|
||||||
|
w.string(pkg.Path())
|
||||||
|
w.string(pkg.Name())
|
||||||
|
w.uint64(uint64(0)) // package height is not needed for go/types
|
||||||
|
|
||||||
|
objs := pkgObjs[pkg]
|
||||||
|
w.uint64(uint64(len(objs)))
|
||||||
|
for _, obj := range objs {
|
||||||
|
w.string(obj.Name())
|
||||||
|
w.uint64(index[obj])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type iexporter struct {
|
||||||
|
fset *token.FileSet
|
||||||
|
out *bytes.Buffer
|
||||||
|
|
||||||
|
// allPkgs tracks all packages that have been referenced by
|
||||||
|
// the export data, so we can ensure to include them in the
|
||||||
|
// main index.
|
||||||
|
allPkgs map[*types.Package]bool
|
||||||
|
|
||||||
|
declTodo objQueue
|
||||||
|
|
||||||
|
strings intWriter
|
||||||
|
stringIndex map[string]uint64
|
||||||
|
|
||||||
|
data0 intWriter
|
||||||
|
declIndex map[types.Object]uint64
|
||||||
|
typIndex map[types.Type]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringOff returns the offset of s within the string section.
|
||||||
|
// If not already present, it's added to the end.
|
||||||
|
func (p *iexporter) stringOff(s string) uint64 {
|
||||||
|
off, ok := p.stringIndex[s]
|
||||||
|
if !ok {
|
||||||
|
off = uint64(p.strings.Len())
|
||||||
|
p.stringIndex[s] = off
|
||||||
|
|
||||||
|
p.strings.uint64(uint64(len(s)))
|
||||||
|
p.strings.WriteString(s)
|
||||||
|
}
|
||||||
|
return off
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushDecl adds n to the declaration work queue, if not already present.
|
||||||
|
func (p *iexporter) pushDecl(obj types.Object) {
|
||||||
|
// Package unsafe is known to the compiler and predeclared.
|
||||||
|
assert(obj.Pkg() != types.Unsafe)
|
||||||
|
|
||||||
|
if _, ok := p.declIndex[obj]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.declIndex[obj] = ^uint64(0) // mark n present in work queue
|
||||||
|
p.declTodo.pushTail(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportWriter handles writing out individual data section chunks.
|
||||||
|
type exportWriter struct {
|
||||||
|
p *iexporter
|
||||||
|
|
||||||
|
data intWriter
|
||||||
|
currPkg *types.Package
|
||||||
|
prevFile string
|
||||||
|
prevLine int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iexporter) doDecl(obj types.Object) {
|
||||||
|
w := p.newWriter()
|
||||||
|
w.setPkg(obj.Pkg(), false)
|
||||||
|
|
||||||
|
switch obj := obj.(type) {
|
||||||
|
case *types.Var:
|
||||||
|
w.tag('V')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.typ(obj.Type(), obj.Pkg())
|
||||||
|
|
||||||
|
case *types.Func:
|
||||||
|
sig, _ := obj.Type().(*types.Signature)
|
||||||
|
if sig.Recv() != nil {
|
||||||
|
panic(internalErrorf("unexpected method: %v", sig))
|
||||||
|
}
|
||||||
|
w.tag('F')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.signature(sig)
|
||||||
|
|
||||||
|
case *types.Const:
|
||||||
|
w.tag('C')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.value(obj.Type(), obj.Val())
|
||||||
|
|
||||||
|
case *types.TypeName:
|
||||||
|
if obj.IsAlias() {
|
||||||
|
w.tag('A')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.typ(obj.Type(), obj.Pkg())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defined type.
|
||||||
|
w.tag('T')
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
|
||||||
|
underlying := obj.Type().Underlying()
|
||||||
|
w.typ(underlying, obj.Pkg())
|
||||||
|
|
||||||
|
t := obj.Type()
|
||||||
|
if types.IsInterface(t) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
named, ok := t.(*types.Named)
|
||||||
|
if !ok {
|
||||||
|
panic(internalErrorf("%s is not a defined type", t))
|
||||||
|
}
|
||||||
|
|
||||||
|
n := named.NumMethods()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
m := named.Method(i)
|
||||||
|
w.pos(m.Pos())
|
||||||
|
w.string(m.Name())
|
||||||
|
sig, _ := m.Type().(*types.Signature)
|
||||||
|
w.param(sig.Recv())
|
||||||
|
w.signature(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected object: %v", obj))
|
||||||
|
}
|
||||||
|
|
||||||
|
p.declIndex[obj] = w.flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) tag(tag byte) {
|
||||||
|
w.data.WriteByte(tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) pos(pos token.Pos) {
|
||||||
|
p := w.p.fset.Position(pos)
|
||||||
|
file := p.Filename
|
||||||
|
line := int64(p.Line)
|
||||||
|
|
||||||
|
// When file is the same as the last position (common case),
|
||||||
|
// we can save a few bytes by delta encoding just the line
|
||||||
|
// number.
|
||||||
|
//
|
||||||
|
// Note: Because data objects may be read out of order (or not
|
||||||
|
// at all), we can only apply delta encoding within a single
|
||||||
|
// object. This is handled implicitly by tracking prevFile and
|
||||||
|
// prevLine as fields of exportWriter.
|
||||||
|
|
||||||
|
if file == w.prevFile {
|
||||||
|
delta := line - w.prevLine
|
||||||
|
w.int64(delta)
|
||||||
|
if delta == deltaNewFile {
|
||||||
|
w.int64(-1)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
w.int64(deltaNewFile)
|
||||||
|
w.int64(line) // line >= 0
|
||||||
|
w.string(file)
|
||||||
|
w.prevFile = file
|
||||||
|
}
|
||||||
|
w.prevLine = line
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) pkg(pkg *types.Package) {
|
||||||
|
// Ensure any referenced packages are declared in the main index.
|
||||||
|
w.p.allPkgs[pkg] = true
|
||||||
|
|
||||||
|
w.string(pkg.Path())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) qualifiedIdent(obj types.Object) {
|
||||||
|
// Ensure any referenced declarations are written out too.
|
||||||
|
w.p.pushDecl(obj)
|
||||||
|
|
||||||
|
w.string(obj.Name())
|
||||||
|
w.pkg(obj.Pkg())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
|
||||||
|
w.data.uint64(w.p.typOff(t, pkg))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iexporter) newWriter() *exportWriter {
|
||||||
|
return &exportWriter{p: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) flush() uint64 {
|
||||||
|
off := uint64(w.p.data0.Len())
|
||||||
|
io.Copy(&w.p.data0, &w.data)
|
||||||
|
return off
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
|
||||||
|
off, ok := p.typIndex[t]
|
||||||
|
if !ok {
|
||||||
|
w := p.newWriter()
|
||||||
|
w.doTyp(t, pkg)
|
||||||
|
off = predeclReserved + w.flush()
|
||||||
|
p.typIndex[t] = off
|
||||||
|
}
|
||||||
|
return off
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) startType(k itag) {
|
||||||
|
w.data.uint64(uint64(k))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
|
||||||
|
switch t := t.(type) {
|
||||||
|
case *types.Named:
|
||||||
|
w.startType(definedType)
|
||||||
|
w.qualifiedIdent(t.Obj())
|
||||||
|
|
||||||
|
case *types.Pointer:
|
||||||
|
w.startType(pointerType)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Slice:
|
||||||
|
w.startType(sliceType)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Array:
|
||||||
|
w.startType(arrayType)
|
||||||
|
w.uint64(uint64(t.Len()))
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Chan:
|
||||||
|
w.startType(chanType)
|
||||||
|
// 1 RecvOnly; 2 SendOnly; 3 SendRecv
|
||||||
|
var dir uint64
|
||||||
|
switch t.Dir() {
|
||||||
|
case types.RecvOnly:
|
||||||
|
dir = 1
|
||||||
|
case types.SendOnly:
|
||||||
|
dir = 2
|
||||||
|
case types.SendRecv:
|
||||||
|
dir = 3
|
||||||
|
}
|
||||||
|
w.uint64(dir)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Map:
|
||||||
|
w.startType(mapType)
|
||||||
|
w.typ(t.Key(), pkg)
|
||||||
|
w.typ(t.Elem(), pkg)
|
||||||
|
|
||||||
|
case *types.Signature:
|
||||||
|
w.startType(signatureType)
|
||||||
|
w.setPkg(pkg, true)
|
||||||
|
w.signature(t)
|
||||||
|
|
||||||
|
case *types.Struct:
|
||||||
|
w.startType(structType)
|
||||||
|
w.setPkg(pkg, true)
|
||||||
|
|
||||||
|
n := t.NumFields()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
w.pos(f.Pos())
|
||||||
|
w.string(f.Name())
|
||||||
|
w.typ(f.Type(), pkg)
|
||||||
|
w.bool(f.Embedded())
|
||||||
|
w.string(t.Tag(i)) // note (or tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
case *types.Interface:
|
||||||
|
w.startType(interfaceType)
|
||||||
|
w.setPkg(pkg, true)
|
||||||
|
|
||||||
|
n := t.NumEmbeddeds()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Embedded(i)
|
||||||
|
w.pos(f.Obj().Pos())
|
||||||
|
w.typ(f.Obj().Type(), f.Obj().Pkg())
|
||||||
|
}
|
||||||
|
|
||||||
|
n = t.NumExplicitMethods()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
m := t.ExplicitMethod(i)
|
||||||
|
w.pos(m.Pos())
|
||||||
|
w.string(m.Name())
|
||||||
|
sig, _ := m.Type().(*types.Signature)
|
||||||
|
w.signature(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
|
||||||
|
if write {
|
||||||
|
w.pkg(pkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.currPkg = pkg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) signature(sig *types.Signature) {
|
||||||
|
w.paramList(sig.Params())
|
||||||
|
w.paramList(sig.Results())
|
||||||
|
if sig.Params().Len() > 0 {
|
||||||
|
w.bool(sig.Variadic())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) paramList(tup *types.Tuple) {
|
||||||
|
n := tup.Len()
|
||||||
|
w.uint64(uint64(n))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
w.param(tup.At(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) param(obj types.Object) {
|
||||||
|
w.pos(obj.Pos())
|
||||||
|
w.localIdent(obj)
|
||||||
|
w.typ(obj.Type(), obj.Pkg())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) value(typ types.Type, v constant.Value) {
|
||||||
|
w.typ(typ, nil)
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
case constant.Bool:
|
||||||
|
w.bool(constant.BoolVal(v))
|
||||||
|
case constant.Int:
|
||||||
|
var i big.Int
|
||||||
|
if i64, exact := constant.Int64Val(v); exact {
|
||||||
|
i.SetInt64(i64)
|
||||||
|
} else if ui64, exact := constant.Uint64Val(v); exact {
|
||||||
|
i.SetUint64(ui64)
|
||||||
|
} else {
|
||||||
|
i.SetString(v.ExactString(), 10)
|
||||||
|
}
|
||||||
|
w.mpint(&i, typ)
|
||||||
|
case constant.Float:
|
||||||
|
f := constantToFloat(v)
|
||||||
|
w.mpfloat(f, typ)
|
||||||
|
case constant.Complex:
|
||||||
|
w.mpfloat(constantToFloat(constant.Real(v)), typ)
|
||||||
|
w.mpfloat(constantToFloat(constant.Imag(v)), typ)
|
||||||
|
case constant.String:
|
||||||
|
w.string(constant.StringVal(v))
|
||||||
|
case constant.Unknown:
|
||||||
|
// package contains type errors
|
||||||
|
default:
|
||||||
|
panic(internalErrorf("unexpected value %v (%T)", v, v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// constantToFloat converts a constant.Value with kind constant.Float to a
|
||||||
|
// big.Float.
|
||||||
|
func constantToFloat(x constant.Value) *big.Float {
|
||||||
|
assert(x.Kind() == constant.Float)
|
||||||
|
// Use the same floating-point precision (512) as cmd/compile
|
||||||
|
// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
|
||||||
|
const mpprec = 512
|
||||||
|
var f big.Float
|
||||||
|
f.SetPrec(mpprec)
|
||||||
|
if v, exact := constant.Float64Val(x); exact {
|
||||||
|
// float64
|
||||||
|
f.SetFloat64(v)
|
||||||
|
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
|
||||||
|
// TODO(gri): add big.Rat accessor to constant.Value.
|
||||||
|
n := valueToRat(num)
|
||||||
|
d := valueToRat(denom)
|
||||||
|
f.SetRat(n.Quo(n, d))
|
||||||
|
} else {
|
||||||
|
// Value too large to represent as a fraction => inaccessible.
|
||||||
|
// TODO(gri): add big.Float accessor to constant.Value.
|
||||||
|
_, ok := f.SetString(x.ExactString())
|
||||||
|
assert(ok)
|
||||||
|
}
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
|
||||||
|
// mpint exports a multi-precision integer.
|
||||||
|
//
|
||||||
|
// For unsigned types, small values are written out as a single
|
||||||
|
// byte. Larger values are written out as a length-prefixed big-endian
|
||||||
|
// byte string, where the length prefix is encoded as its complement.
|
||||||
|
// For example, bytes 0, 1, and 2 directly represent the integer
|
||||||
|
// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
|
||||||
|
// 2-, and 3-byte big-endian string follow.
|
||||||
|
//
|
||||||
|
// Encoding for signed types use the same general approach as for
|
||||||
|
// unsigned types, except small values use zig-zag encoding and the
|
||||||
|
// bottom bit of length prefix byte for large values is reserved as a
|
||||||
|
// sign bit.
|
||||||
|
//
|
||||||
|
// The exact boundary between small and large encodings varies
|
||||||
|
// according to the maximum number of bytes needed to encode a value
|
||||||
|
// of type typ. As a special case, 8-bit types are always encoded as a
|
||||||
|
// single byte.
|
||||||
|
//
|
||||||
|
// TODO(mdempsky): Is this level of complexity really worthwhile?
|
||||||
|
func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
|
||||||
|
basic, ok := typ.Underlying().(*types.Basic)
|
||||||
|
if !ok {
|
||||||
|
panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
|
||||||
|
}
|
||||||
|
|
||||||
|
signed, maxBytes := intSize(basic)
|
||||||
|
|
||||||
|
negative := x.Sign() < 0
|
||||||
|
if !signed && negative {
|
||||||
|
panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
|
||||||
|
}
|
||||||
|
|
||||||
|
b := x.Bytes()
|
||||||
|
if len(b) > 0 && b[0] == 0 {
|
||||||
|
panic(internalErrorf("leading zeros"))
|
||||||
|
}
|
||||||
|
if uint(len(b)) > maxBytes {
|
||||||
|
panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
|
||||||
|
}
|
||||||
|
|
||||||
|
maxSmall := 256 - maxBytes
|
||||||
|
if signed {
|
||||||
|
maxSmall = 256 - 2*maxBytes
|
||||||
|
}
|
||||||
|
if maxBytes == 1 {
|
||||||
|
maxSmall = 256
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if x can use small value encoding.
|
||||||
|
if len(b) <= 1 {
|
||||||
|
var ux uint
|
||||||
|
if len(b) == 1 {
|
||||||
|
ux = uint(b[0])
|
||||||
|
}
|
||||||
|
if signed {
|
||||||
|
ux <<= 1
|
||||||
|
if negative {
|
||||||
|
ux--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ux < maxSmall {
|
||||||
|
w.data.WriteByte(byte(ux))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n := 256 - uint(len(b))
|
||||||
|
if signed {
|
||||||
|
n = 256 - 2*uint(len(b))
|
||||||
|
if negative {
|
||||||
|
n |= 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n < maxSmall || n >= 256 {
|
||||||
|
panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
|
||||||
|
}
|
||||||
|
|
||||||
|
w.data.WriteByte(byte(n))
|
||||||
|
w.data.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mpfloat exports a multi-precision floating point number.
|
||||||
|
//
|
||||||
|
// The number's value is decomposed into mantissa × 2**exponent, where
|
||||||
|
// mantissa is an integer. The value is written out as mantissa (as a
|
||||||
|
// multi-precision integer) and then the exponent, except exponent is
|
||||||
|
// omitted if mantissa is zero.
|
||||||
|
func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
|
||||||
|
if f.IsInf() {
|
||||||
|
panic("infinite constant")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
|
||||||
|
var mant big.Float
|
||||||
|
exp := int64(f.MantExp(&mant))
|
||||||
|
|
||||||
|
// Scale so that mant is an integer.
|
||||||
|
prec := mant.MinPrec()
|
||||||
|
mant.SetMantExp(&mant, int(prec))
|
||||||
|
exp -= int64(prec)
|
||||||
|
|
||||||
|
manti, acc := mant.Int(nil)
|
||||||
|
if acc != big.Exact {
|
||||||
|
panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
|
||||||
|
}
|
||||||
|
w.mpint(manti, typ)
|
||||||
|
if manti.Sign() != 0 {
|
||||||
|
w.int64(exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) bool(b bool) bool {
|
||||||
|
var x uint64
|
||||||
|
if b {
|
||||||
|
x = 1
|
||||||
|
}
|
||||||
|
w.uint64(x)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *exportWriter) int64(x int64) { w.data.int64(x) }
|
||||||
|
func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
|
||||||
|
func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
|
||||||
|
|
||||||
|
func (w *exportWriter) localIdent(obj types.Object) {
|
||||||
|
// Anonymous parameters.
|
||||||
|
if obj == nil {
|
||||||
|
w.string("")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
name := obj.Name()
|
||||||
|
if name == "_" {
|
||||||
|
w.string("_")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.string(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type intWriter struct {
|
||||||
|
bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *intWriter) int64(x int64) {
|
||||||
|
var buf [binary.MaxVarintLen64]byte
|
||||||
|
n := binary.PutVarint(buf[:], x)
|
||||||
|
w.Write(buf[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *intWriter) uint64(x uint64) {
|
||||||
|
var buf [binary.MaxVarintLen64]byte
|
||||||
|
n := binary.PutUvarint(buf[:], x)
|
||||||
|
w.Write(buf[:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func assert(cond bool) {
|
||||||
|
if !cond {
|
||||||
|
panic("internal error: assertion failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
|
||||||
|
|
||||||
|
// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
|
||||||
|
// a ready-to-use empty queue.
|
||||||
|
type objQueue struct {
|
||||||
|
ring []types.Object
|
||||||
|
head, tail int
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty returns true if q contains no Nodes.
|
||||||
|
func (q *objQueue) empty() bool {
|
||||||
|
return q.head == q.tail
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushTail appends n to the tail of the queue.
|
||||||
|
func (q *objQueue) pushTail(obj types.Object) {
|
||||||
|
if len(q.ring) == 0 {
|
||||||
|
q.ring = make([]types.Object, 16)
|
||||||
|
} else if q.head+len(q.ring) == q.tail {
|
||||||
|
// Grow the ring.
|
||||||
|
nring := make([]types.Object, len(q.ring)*2)
|
||||||
|
// Copy the old elements.
|
||||||
|
part := q.ring[q.head%len(q.ring):]
|
||||||
|
if q.tail-q.head <= len(part) {
|
||||||
|
part = part[:q.tail-q.head]
|
||||||
|
copy(nring, part)
|
||||||
|
} else {
|
||||||
|
pos := copy(nring, part)
|
||||||
|
copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
|
||||||
|
}
|
||||||
|
q.ring, q.head, q.tail = nring, 0, q.tail-q.head
|
||||||
|
}
|
||||||
|
|
||||||
|
q.ring[q.tail%len(q.ring)] = obj
|
||||||
|
q.tail++
|
||||||
|
}
|
||||||
|
|
||||||
|
// popHead pops a node from the head of the queue. It panics if q is empty.
|
||||||
|
func (q *objQueue) popHead() types.Object {
|
||||||
|
if q.empty() {
|
||||||
|
panic("dequeue empty")
|
||||||
|
}
|
||||||
|
obj := q.ring[q.head%len(q.ring)]
|
||||||
|
q.head++
|
||||||
|
return obj
|
||||||
|
}
|
||||||
606
vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
generated
vendored
Normal file
606
vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
generated
vendored
Normal file
@@ -0,0 +1,606 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Indexed package import.
|
||||||
|
// See cmd/compile/internal/gc/iexport.go for the export data format.
|
||||||
|
|
||||||
|
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"go/constant"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type intReader struct {
|
||||||
|
*bytes.Reader
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intReader) int64() int64 {
|
||||||
|
i, err := binary.ReadVarint(r.Reader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("import %q: read varint error: %v", r.path, err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intReader) uint64() uint64 {
|
||||||
|
i, err := binary.ReadUvarint(r.Reader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("import %q: read varint error: %v", r.path, err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
const predeclReserved = 32
|
||||||
|
|
||||||
|
type itag uint64
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Types
|
||||||
|
definedType itag = iota
|
||||||
|
pointerType
|
||||||
|
sliceType
|
||||||
|
arrayType
|
||||||
|
chanType
|
||||||
|
mapType
|
||||||
|
signatureType
|
||||||
|
structType
|
||||||
|
interfaceType
|
||||||
|
)
|
||||||
|
|
||||||
|
// IImportData imports a package from the serialized package data
|
||||||
|
// and returns the number of bytes consumed and a reference to the package.
|
||||||
|
// If the export data version is not recognized or the format is otherwise
|
||||||
|
// compromised, an error is returned.
|
||||||
|
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||||
|
const currentVersion = 0
|
||||||
|
version := -1
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
if version > currentVersion {
|
||||||
|
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
r := &intReader{bytes.NewReader(data), path}
|
||||||
|
|
||||||
|
version = int(r.uint64())
|
||||||
|
switch version {
|
||||||
|
case currentVersion:
|
||||||
|
default:
|
||||||
|
errorf("unknown iexport format version %d", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
sLen := int64(r.uint64())
|
||||||
|
dLen := int64(r.uint64())
|
||||||
|
|
||||||
|
whence, _ := r.Seek(0, io.SeekCurrent)
|
||||||
|
stringData := data[whence : whence+sLen]
|
||||||
|
declData := data[whence+sLen : whence+sLen+dLen]
|
||||||
|
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||||
|
|
||||||
|
p := iimporter{
|
||||||
|
ipath: path,
|
||||||
|
|
||||||
|
stringData: stringData,
|
||||||
|
stringCache: make(map[uint64]string),
|
||||||
|
pkgCache: make(map[uint64]*types.Package),
|
||||||
|
|
||||||
|
declData: declData,
|
||||||
|
pkgIndex: make(map[*types.Package]map[string]uint64),
|
||||||
|
typCache: make(map[uint64]types.Type),
|
||||||
|
|
||||||
|
fake: fakeFileSet{
|
||||||
|
fset: fset,
|
||||||
|
files: make(map[string]*token.File),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, pt := range predeclared() {
|
||||||
|
p.typCache[uint64(i)] = pt
|
||||||
|
}
|
||||||
|
|
||||||
|
pkgList := make([]*types.Package, r.uint64())
|
||||||
|
for i := range pkgList {
|
||||||
|
pkgPathOff := r.uint64()
|
||||||
|
pkgPath := p.stringAt(pkgPathOff)
|
||||||
|
pkgName := p.stringAt(r.uint64())
|
||||||
|
_ = r.uint64() // package height; unused by go/types
|
||||||
|
|
||||||
|
if pkgPath == "" {
|
||||||
|
pkgPath = path
|
||||||
|
}
|
||||||
|
pkg := imports[pkgPath]
|
||||||
|
if pkg == nil {
|
||||||
|
pkg = types.NewPackage(pkgPath, pkgName)
|
||||||
|
imports[pkgPath] = pkg
|
||||||
|
} else if pkg.Name() != pkgName {
|
||||||
|
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pkgCache[pkgPathOff] = pkg
|
||||||
|
|
||||||
|
nameIndex := make(map[string]uint64)
|
||||||
|
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
|
||||||
|
name := p.stringAt(r.uint64())
|
||||||
|
nameIndex[name] = r.uint64()
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pkgIndex[pkg] = nameIndex
|
||||||
|
pkgList[i] = pkg
|
||||||
|
}
|
||||||
|
var localpkg *types.Package
|
||||||
|
for _, pkg := range pkgList {
|
||||||
|
if pkg.Path() == path {
|
||||||
|
localpkg = pkg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
names := make([]string, 0, len(p.pkgIndex[localpkg]))
|
||||||
|
for name := range p.pkgIndex[localpkg] {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
for _, name := range names {
|
||||||
|
p.doDecl(localpkg, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, typ := range p.interfaceList {
|
||||||
|
typ.Complete()
|
||||||
|
}
|
||||||
|
|
||||||
|
// record all referenced packages as imports
|
||||||
|
list := append(([]*types.Package)(nil), pkgList[1:]...)
|
||||||
|
sort.Sort(byPath(list))
|
||||||
|
localpkg.SetImports(list)
|
||||||
|
|
||||||
|
// package was imported completely and without errors
|
||||||
|
localpkg.MarkComplete()
|
||||||
|
|
||||||
|
consumed, _ := r.Seek(0, io.SeekCurrent)
|
||||||
|
return int(consumed), localpkg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type iimporter struct {
|
||||||
|
ipath string
|
||||||
|
|
||||||
|
stringData []byte
|
||||||
|
stringCache map[uint64]string
|
||||||
|
pkgCache map[uint64]*types.Package
|
||||||
|
|
||||||
|
declData []byte
|
||||||
|
pkgIndex map[*types.Package]map[string]uint64
|
||||||
|
typCache map[uint64]types.Type
|
||||||
|
|
||||||
|
fake fakeFileSet
|
||||||
|
interfaceList []*types.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) doDecl(pkg *types.Package, name string) {
|
||||||
|
// See if we've already imported this declaration.
|
||||||
|
if obj := pkg.Scope().Lookup(name); obj != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
off, ok := p.pkgIndex[pkg][name]
|
||||||
|
if !ok {
|
||||||
|
errorf("%v.%v not in index", pkg, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &importReader{p: p, currPkg: pkg}
|
||||||
|
r.declReader.Reset(p.declData[off:])
|
||||||
|
|
||||||
|
r.obj(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) stringAt(off uint64) string {
|
||||||
|
if s, ok := p.stringCache[off]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
slen, n := binary.Uvarint(p.stringData[off:])
|
||||||
|
if n <= 0 {
|
||||||
|
errorf("varint failed")
|
||||||
|
}
|
||||||
|
spos := off + uint64(n)
|
||||||
|
s := string(p.stringData[spos : spos+slen])
|
||||||
|
p.stringCache[off] = s
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||||
|
if pkg, ok := p.pkgCache[off]; ok {
|
||||||
|
return pkg
|
||||||
|
}
|
||||||
|
path := p.stringAt(off)
|
||||||
|
errorf("missing package %q in %q", path, p.ipath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
|
||||||
|
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
if off < predeclReserved {
|
||||||
|
errorf("predeclared type missing from cache: %v", off)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &importReader{p: p}
|
||||||
|
r.declReader.Reset(p.declData[off-predeclReserved:])
|
||||||
|
t := r.doType(base)
|
||||||
|
|
||||||
|
if base == nil || !isInterface(t) {
|
||||||
|
p.typCache[off] = t
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
type importReader struct {
|
||||||
|
p *iimporter
|
||||||
|
declReader bytes.Reader
|
||||||
|
currPkg *types.Package
|
||||||
|
prevFile string
|
||||||
|
prevLine int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) obj(name string) {
|
||||||
|
tag := r.byte()
|
||||||
|
pos := r.pos()
|
||||||
|
|
||||||
|
switch tag {
|
||||||
|
case 'A':
|
||||||
|
typ := r.typ()
|
||||||
|
|
||||||
|
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
|
||||||
|
|
||||||
|
case 'C':
|
||||||
|
typ, val := r.value()
|
||||||
|
|
||||||
|
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
|
||||||
|
|
||||||
|
case 'F':
|
||||||
|
sig := r.signature(nil)
|
||||||
|
|
||||||
|
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
|
||||||
|
|
||||||
|
case 'T':
|
||||||
|
// Types can be recursive. We need to setup a stub
|
||||||
|
// declaration before recursing.
|
||||||
|
obj := types.NewTypeName(pos, r.currPkg, name, nil)
|
||||||
|
named := types.NewNamed(obj, nil, nil)
|
||||||
|
r.declare(obj)
|
||||||
|
|
||||||
|
underlying := r.p.typAt(r.uint64(), named).Underlying()
|
||||||
|
named.SetUnderlying(underlying)
|
||||||
|
|
||||||
|
if !isInterface(underlying) {
|
||||||
|
for n := r.uint64(); n > 0; n-- {
|
||||||
|
mpos := r.pos()
|
||||||
|
mname := r.ident()
|
||||||
|
recv := r.param()
|
||||||
|
msig := r.signature(recv)
|
||||||
|
|
||||||
|
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'V':
|
||||||
|
typ := r.typ()
|
||||||
|
|
||||||
|
r.declare(types.NewVar(pos, r.currPkg, name, typ))
|
||||||
|
|
||||||
|
default:
|
||||||
|
errorf("unexpected tag: %v", tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) declare(obj types.Object) {
|
||||||
|
obj.Pkg().Scope().Insert(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) value() (typ types.Type, val constant.Value) {
|
||||||
|
typ = r.typ()
|
||||||
|
|
||||||
|
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
|
||||||
|
case types.IsBoolean:
|
||||||
|
val = constant.MakeBool(r.bool())
|
||||||
|
|
||||||
|
case types.IsString:
|
||||||
|
val = constant.MakeString(r.string())
|
||||||
|
|
||||||
|
case types.IsInteger:
|
||||||
|
val = r.mpint(b)
|
||||||
|
|
||||||
|
case types.IsFloat:
|
||||||
|
val = r.mpfloat(b)
|
||||||
|
|
||||||
|
case types.IsComplex:
|
||||||
|
re := r.mpfloat(b)
|
||||||
|
im := r.mpfloat(b)
|
||||||
|
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||||
|
|
||||||
|
default:
|
||||||
|
if b.Kind() == types.Invalid {
|
||||||
|
val = constant.MakeUnknown()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errorf("unexpected type %v", typ) // panics
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
|
||||||
|
if (b.Info() & types.IsUntyped) != 0 {
|
||||||
|
return true, 64
|
||||||
|
}
|
||||||
|
|
||||||
|
switch b.Kind() {
|
||||||
|
case types.Float32, types.Complex64:
|
||||||
|
return true, 3
|
||||||
|
case types.Float64, types.Complex128:
|
||||||
|
return true, 7
|
||||||
|
}
|
||||||
|
|
||||||
|
signed = (b.Info() & types.IsUnsigned) == 0
|
||||||
|
switch b.Kind() {
|
||||||
|
case types.Int8, types.Uint8:
|
||||||
|
maxBytes = 1
|
||||||
|
case types.Int16, types.Uint16:
|
||||||
|
maxBytes = 2
|
||||||
|
case types.Int32, types.Uint32:
|
||||||
|
maxBytes = 4
|
||||||
|
default:
|
||||||
|
maxBytes = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) mpint(b *types.Basic) constant.Value {
|
||||||
|
signed, maxBytes := intSize(b)
|
||||||
|
|
||||||
|
maxSmall := 256 - maxBytes
|
||||||
|
if signed {
|
||||||
|
maxSmall = 256 - 2*maxBytes
|
||||||
|
}
|
||||||
|
if maxBytes == 1 {
|
||||||
|
maxSmall = 256
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := r.declReader.ReadByte()
|
||||||
|
if uint(n) < maxSmall {
|
||||||
|
v := int64(n)
|
||||||
|
if signed {
|
||||||
|
v >>= 1
|
||||||
|
if n&1 != 0 {
|
||||||
|
v = ^v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return constant.MakeInt64(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
v := -n
|
||||||
|
if signed {
|
||||||
|
v = -(n &^ 1) >> 1
|
||||||
|
}
|
||||||
|
if v < 1 || uint(v) > maxBytes {
|
||||||
|
errorf("weird decoding: %v, %v => %v", n, signed, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, v)
|
||||||
|
io.ReadFull(&r.declReader, buf)
|
||||||
|
|
||||||
|
// convert to little endian
|
||||||
|
// TODO(gri) go/constant should have a more direct conversion function
|
||||||
|
// (e.g., once it supports a big.Float based implementation)
|
||||||
|
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
buf[i], buf[j] = buf[j], buf[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
x := constant.MakeFromBytes(buf)
|
||||||
|
if signed && n&1 != 0 {
|
||||||
|
x = constant.UnaryOp(token.SUB, x, 0)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
|
||||||
|
x := r.mpint(b)
|
||||||
|
if constant.Sign(x) == 0 {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := r.int64()
|
||||||
|
switch {
|
||||||
|
case exp > 0:
|
||||||
|
x = constant.Shift(x, token.SHL, uint(exp))
|
||||||
|
case exp < 0:
|
||||||
|
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
|
||||||
|
x = constant.BinaryOp(x, token.QUO, d)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) ident() string {
|
||||||
|
return r.string()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
||||||
|
name := r.string()
|
||||||
|
pkg := r.pkg()
|
||||||
|
return pkg, name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) pos() token.Pos {
|
||||||
|
delta := r.int64()
|
||||||
|
if delta != deltaNewFile {
|
||||||
|
r.prevLine += delta
|
||||||
|
} else if l := r.int64(); l == -1 {
|
||||||
|
r.prevLine += deltaNewFile
|
||||||
|
} else {
|
||||||
|
r.prevFile = r.string()
|
||||||
|
r.prevLine = l
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.prevFile == "" && r.prevLine == 0 {
|
||||||
|
return token.NoPos
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.p.fake.pos(r.prevFile, int(r.prevLine))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) typ() types.Type {
|
||||||
|
return r.p.typAt(r.uint64(), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isInterface(t types.Type) bool {
|
||||||
|
_, ok := t.(*types.Interface)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
|
||||||
|
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
|
||||||
|
|
||||||
|
func (r *importReader) doType(base *types.Named) types.Type {
|
||||||
|
switch k := r.kind(); k {
|
||||||
|
default:
|
||||||
|
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case definedType:
|
||||||
|
pkg, name := r.qualifiedIdent()
|
||||||
|
r.p.doDecl(pkg, name)
|
||||||
|
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
|
||||||
|
case pointerType:
|
||||||
|
return types.NewPointer(r.typ())
|
||||||
|
case sliceType:
|
||||||
|
return types.NewSlice(r.typ())
|
||||||
|
case arrayType:
|
||||||
|
n := r.uint64()
|
||||||
|
return types.NewArray(r.typ(), int64(n))
|
||||||
|
case chanType:
|
||||||
|
dir := chanDir(int(r.uint64()))
|
||||||
|
return types.NewChan(dir, r.typ())
|
||||||
|
case mapType:
|
||||||
|
return types.NewMap(r.typ(), r.typ())
|
||||||
|
case signatureType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
return r.signature(nil)
|
||||||
|
|
||||||
|
case structType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
|
||||||
|
fields := make([]*types.Var, r.uint64())
|
||||||
|
tags := make([]string, len(fields))
|
||||||
|
for i := range fields {
|
||||||
|
fpos := r.pos()
|
||||||
|
fname := r.ident()
|
||||||
|
ftyp := r.typ()
|
||||||
|
emb := r.bool()
|
||||||
|
tag := r.string()
|
||||||
|
|
||||||
|
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
|
||||||
|
tags[i] = tag
|
||||||
|
}
|
||||||
|
return types.NewStruct(fields, tags)
|
||||||
|
|
||||||
|
case interfaceType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
|
||||||
|
embeddeds := make([]types.Type, r.uint64())
|
||||||
|
for i := range embeddeds {
|
||||||
|
_ = r.pos()
|
||||||
|
embeddeds[i] = r.typ()
|
||||||
|
}
|
||||||
|
|
||||||
|
methods := make([]*types.Func, r.uint64())
|
||||||
|
for i := range methods {
|
||||||
|
mpos := r.pos()
|
||||||
|
mname := r.ident()
|
||||||
|
|
||||||
|
// TODO(mdempsky): Matches bimport.go, but I
|
||||||
|
// don't agree with this.
|
||||||
|
var recv *types.Var
|
||||||
|
if base != nil {
|
||||||
|
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
|
||||||
|
}
|
||||||
|
|
||||||
|
msig := r.signature(recv)
|
||||||
|
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := newInterface(methods, embeddeds)
|
||||||
|
r.p.interfaceList = append(r.p.interfaceList, typ)
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) kind() itag {
|
||||||
|
return itag(r.uint64())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) signature(recv *types.Var) *types.Signature {
|
||||||
|
params := r.paramList()
|
||||||
|
results := r.paramList()
|
||||||
|
variadic := params.Len() > 0 && r.bool()
|
||||||
|
return types.NewSignature(recv, params, results, variadic)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) paramList() *types.Tuple {
|
||||||
|
xs := make([]*types.Var, r.uint64())
|
||||||
|
for i := range xs {
|
||||||
|
xs[i] = r.param()
|
||||||
|
}
|
||||||
|
return types.NewTuple(xs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) param() *types.Var {
|
||||||
|
pos := r.pos()
|
||||||
|
name := r.ident()
|
||||||
|
typ := r.typ()
|
||||||
|
return types.NewParam(pos, r.currPkg, name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) bool() bool {
|
||||||
|
return r.uint64() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) int64() int64 {
|
||||||
|
n, err := binary.ReadVarint(&r.declReader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("readVarint: %v", err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) uint64() uint64 {
|
||||||
|
n, err := binary.ReadUvarint(&r.declReader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("readUvarint: %v", err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) byte() byte {
|
||||||
|
x, err := r.declReader.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
errorf("declReader.ReadByte: %v", err)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
21
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
21
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.11
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import "go/types"
|
||||||
|
|
||||||
|
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||||
|
named := make([]*types.Named, len(embeddeds))
|
||||||
|
for i, e := range embeddeds {
|
||||||
|
var ok bool
|
||||||
|
named[i], ok = e.(*types.Named)
|
||||||
|
if !ok {
|
||||||
|
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return types.NewInterface(methods, named)
|
||||||
|
}
|
||||||
13
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.11
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import "go/types"
|
||||||
|
|
||||||
|
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||||
|
return types.NewInterfaceType(methods, embeddeds)
|
||||||
|
}
|
||||||
23
vendor/golang.org/x/tools/go/internal/packagesdriver/BUILD
generated
vendored
Normal file
23
vendor/golang.org/x/tools/go/internal/packagesdriver/BUILD
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["sizes.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/internal/packagesdriver",
|
||||||
|
importpath = "golang.org/x/tools/go/internal/packagesdriver",
|
||||||
|
visibility = ["//vendor/golang.org/x/tools/go:__subpackages__"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
160
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
Normal file
160
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package packagesdriver fetches type sizes for go/packages and go/analysis.
|
||||||
|
package packagesdriver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var debug = false
|
||||||
|
|
||||||
|
// GetSizes returns the sizes used by the underlying driver with the given parameters.
|
||||||
|
func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
|
||||||
|
// TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
|
||||||
|
const toolPrefix = "GOPACKAGESDRIVER="
|
||||||
|
tool := ""
|
||||||
|
for _, env := range env {
|
||||||
|
if val := strings.TrimPrefix(env, toolPrefix); val != env {
|
||||||
|
tool = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tool == "" {
|
||||||
|
var err error
|
||||||
|
tool, err = exec.LookPath("gopackagesdriver")
|
||||||
|
if err != nil {
|
||||||
|
// We did not find the driver, so use "go list".
|
||||||
|
tool = "off"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tool == "off" {
|
||||||
|
return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := json.Marshal(struct {
|
||||||
|
Command string `json:"command"`
|
||||||
|
Env []string `json:"env"`
|
||||||
|
BuildFlags []string `json:"build_flags"`
|
||||||
|
}{
|
||||||
|
Command: "sizes",
|
||||||
|
Env: env,
|
||||||
|
BuildFlags: buildFlags,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(ctx, tool)
|
||||||
|
cmd.Dir = dir
|
||||||
|
cmd.Env = env
|
||||||
|
cmd.Stdin = bytes.NewReader(req)
|
||||||
|
cmd.Stdout = buf
|
||||||
|
cmd.Stderr = new(bytes.Buffer)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||||
|
}
|
||||||
|
var response struct {
|
||||||
|
// Sizes, if not nil, is the types.Sizes to use when type checking.
|
||||||
|
Sizes *types.StdSizes
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return response.Sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
|
||||||
|
args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
|
||||||
|
args = append(args, buildFlags...)
|
||||||
|
args = append(args, "--", "unsafe")
|
||||||
|
stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fields := strings.Fields(stdout.String())
|
||||||
|
if len(fields) < 2 {
|
||||||
|
return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
|
||||||
|
}
|
||||||
|
goarch := fields[0]
|
||||||
|
compiler := fields[1]
|
||||||
|
return types.SizesFor(compiler, goarch), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvokeGo returns the stdout of a go command invocation.
|
||||||
|
func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
|
||||||
|
if debug {
|
||||||
|
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
|
||||||
|
}
|
||||||
|
stdout := new(bytes.Buffer)
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(ctx, "go", args...)
|
||||||
|
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||||
|
// expects the working directory to keep the original path, including the
|
||||||
|
// go command when dealing with modules.
|
||||||
|
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||||
|
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||||
|
// process we fix up all the paths returned by the go command.
|
||||||
|
cmd.Env = append(append([]string{}, env...), "PWD="+dir)
|
||||||
|
cmd.Dir = dir
|
||||||
|
cmd.Stdout = stdout
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
exitErr, ok := err.(*exec.ExitError)
|
||||||
|
if !ok {
|
||||||
|
// Catastrophic error:
|
||||||
|
// - executable not found
|
||||||
|
// - context cancellation
|
||||||
|
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export mode entails a build.
|
||||||
|
// If that build fails, errors appear on stderr
|
||||||
|
// (despite the -e flag) and the Export field is blank.
|
||||||
|
// Do not fail in that case.
|
||||||
|
if !usesExportData {
|
||||||
|
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// As of writing, go list -export prints some non-fatal compilation
|
||||||
|
// errors to stderr, even with -e set. We would prefer that it put
|
||||||
|
// them in the Package.Error JSON (see https://golang.org/issue/26319).
|
||||||
|
// In the meantime, there's nowhere good to put them, but they can
|
||||||
|
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
|
||||||
|
// is set.
|
||||||
|
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugging
|
||||||
|
if false {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdDebugStr(envlist []string, args ...string) string {
|
||||||
|
env := make(map[string]string)
|
||||||
|
for _, kv := range envlist {
|
||||||
|
split := strings.Split(kv, "=")
|
||||||
|
k, v := split[0], split[1]
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
|
||||||
|
}
|
||||||
39
vendor/golang.org/x/tools/go/packages/BUILD
generated
vendored
Normal file
39
vendor/golang.org/x/tools/go/packages/BUILD
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"external.go",
|
||||||
|
"golist.go",
|
||||||
|
"golist_fallback.go",
|
||||||
|
"golist_fallback_testmain.go",
|
||||||
|
"golist_overlay.go",
|
||||||
|
"packages.go",
|
||||||
|
"visit.go",
|
||||||
|
],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/packages",
|
||||||
|
importpath = "golang.org/x/tools/go/packages",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/golang.org/x/tools/go/gcexportdata:go_default_library",
|
||||||
|
"//vendor/golang.org/x/tools/go/internal/cgo:go_default_library",
|
||||||
|
"//vendor/golang.org/x/tools/go/internal/packagesdriver:go_default_library",
|
||||||
|
"//vendor/golang.org/x/tools/internal/gopathwalk:go_default_library",
|
||||||
|
"//vendor/golang.org/x/tools/internal/semver:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
222
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
Normal file
222
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package packages loads Go packages for inspection and analysis.
|
||||||
|
|
||||||
|
The Load function takes as input a list of patterns and return a list of Package
|
||||||
|
structs describing individual packages matched by those patterns.
|
||||||
|
The LoadMode controls the amount of detail in the loaded packages.
|
||||||
|
|
||||||
|
Load passes most patterns directly to the underlying build tool,
|
||||||
|
but all patterns with the prefix "query=", where query is a
|
||||||
|
non-empty string of letters from [a-z], are reserved and may be
|
||||||
|
interpreted as query operators.
|
||||||
|
|
||||||
|
Two query operators are currently supported: "file" and "pattern".
|
||||||
|
|
||||||
|
The query "file=path/to/file.go" matches the package or packages enclosing
|
||||||
|
the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
|
||||||
|
might return the packages "fmt" and "fmt [fmt.test]".
|
||||||
|
|
||||||
|
The query "pattern=string" causes "string" to be passed directly to
|
||||||
|
the underlying build tool. In most cases this is unnecessary,
|
||||||
|
but an application can use Load("pattern=" + x) as an escaping mechanism
|
||||||
|
to ensure that x is not interpreted as a query operator if it contains '='.
|
||||||
|
|
||||||
|
All other query operators are reserved for future use and currently
|
||||||
|
cause Load to report an error.
|
||||||
|
|
||||||
|
The Package struct provides basic information about the package, including
|
||||||
|
|
||||||
|
- ID, a unique identifier for the package in the returned set;
|
||||||
|
- GoFiles, the names of the package's Go source files;
|
||||||
|
- Imports, a map from source import strings to the Packages they name;
|
||||||
|
- Types, the type information for the package's exported symbols;
|
||||||
|
- Syntax, the parsed syntax trees for the package's source code; and
|
||||||
|
- TypeInfo, the result of a complete type-check of the package syntax trees.
|
||||||
|
|
||||||
|
(See the documentation for type Package for the complete list of fields
|
||||||
|
and more detailed descriptions.)
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
Load(nil, "bytes", "unicode...")
|
||||||
|
|
||||||
|
returns four Package structs describing the standard library packages
|
||||||
|
bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
|
||||||
|
can match multiple packages and that a package might be matched by
|
||||||
|
multiple patterns: in general it is not possible to determine which
|
||||||
|
packages correspond to which patterns.
|
||||||
|
|
||||||
|
Note that the list returned by Load contains only the packages matched
|
||||||
|
by the patterns. Their dependencies can be found by walking the import
|
||||||
|
graph using the Imports fields.
|
||||||
|
|
||||||
|
The Load function can be configured by passing a pointer to a Config as
|
||||||
|
the first argument. A nil Config is equivalent to the zero Config, which
|
||||||
|
causes Load to run in LoadFiles mode, collecting minimal information.
|
||||||
|
See the documentation for type Config for details.
|
||||||
|
|
||||||
|
As noted earlier, the Config.Mode controls the amount of detail
|
||||||
|
reported about the loaded packages, with each mode returning all the data of the
|
||||||
|
previous mode with some extra added. See the documentation for type LoadMode
|
||||||
|
for details.
|
||||||
|
|
||||||
|
Most tools should pass their command-line arguments (after any flags)
|
||||||
|
uninterpreted to the loader, so that the loader can interpret them
|
||||||
|
according to the conventions of the underlying build system.
|
||||||
|
See the Example function for typical usage.
|
||||||
|
|
||||||
|
*/
|
||||||
|
package packages
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
Motivation and design considerations
|
||||||
|
|
||||||
|
The new package's design solves problems addressed by two existing
|
||||||
|
packages: go/build, which locates and describes packages, and
|
||||||
|
golang.org/x/tools/go/loader, which loads, parses and type-checks them.
|
||||||
|
The go/build.Package structure encodes too much of the 'go build' way
|
||||||
|
of organizing projects, leaving us in need of a data type that describes a
|
||||||
|
package of Go source code independent of the underlying build system.
|
||||||
|
We wanted something that works equally well with go build and vgo, and
|
||||||
|
also other build systems such as Bazel and Blaze, making it possible to
|
||||||
|
construct analysis tools that work in all these environments.
|
||||||
|
Tools such as errcheck and staticcheck were essentially unavailable to
|
||||||
|
the Go community at Google, and some of Google's internal tools for Go
|
||||||
|
are unavailable externally.
|
||||||
|
This new package provides a uniform way to obtain package metadata by
|
||||||
|
querying each of these build systems, optionally supporting their
|
||||||
|
preferred command-line notations for packages, so that tools integrate
|
||||||
|
neatly with users' build environments. The Metadata query function
|
||||||
|
executes an external query tool appropriate to the current workspace.
|
||||||
|
|
||||||
|
Loading packages always returns the complete import graph "all the way down",
|
||||||
|
even if all you want is information about a single package, because the query
|
||||||
|
mechanisms of all the build systems we currently support ({go,vgo} list, and
|
||||||
|
blaze/bazel aspect-based query) cannot provide detailed information
|
||||||
|
about one package without visiting all its dependencies too, so there is
|
||||||
|
no additional asymptotic cost to providing transitive information.
|
||||||
|
(This property might not be true of a hypothetical 5th build system.)
|
||||||
|
|
||||||
|
In calls to TypeCheck, all initial packages, and any package that
|
||||||
|
transitively depends on one of them, must be loaded from source.
|
||||||
|
Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
|
||||||
|
source; D may be loaded from export data, and E may not be loaded at all
|
||||||
|
(though it's possible that D's export data mentions it, so a
|
||||||
|
types.Package may be created for it and exposed.)
|
||||||
|
|
||||||
|
The old loader had a feature to suppress type-checking of function
|
||||||
|
bodies on a per-package basis, primarily intended to reduce the work of
|
||||||
|
obtaining type information for imported packages. Now that imports are
|
||||||
|
satisfied by export data, the optimization no longer seems necessary.
|
||||||
|
|
||||||
|
Despite some early attempts, the old loader did not exploit export data,
|
||||||
|
instead always using the equivalent of WholeProgram mode. This was due
|
||||||
|
to the complexity of mixing source and export data packages (now
|
||||||
|
resolved by the upward traversal mentioned above), and because export data
|
||||||
|
files were nearly always missing or stale. Now that 'go build' supports
|
||||||
|
caching, all the underlying build systems can guarantee to produce
|
||||||
|
export data in a reasonable (amortized) time.
|
||||||
|
|
||||||
|
Test "main" packages synthesized by the build system are now reported as
|
||||||
|
first-class packages, avoiding the need for clients (such as go/ssa) to
|
||||||
|
reinvent this generation logic.
|
||||||
|
|
||||||
|
One way in which go/packages is simpler than the old loader is in its
|
||||||
|
treatment of in-package tests. In-package tests are packages that
|
||||||
|
consist of all the files of the library under test, plus the test files.
|
||||||
|
The old loader constructed in-package tests by a two-phase process of
|
||||||
|
mutation called "augmentation": first it would construct and type check
|
||||||
|
all the ordinary library packages and type-check the packages that
|
||||||
|
depend on them; then it would add more (test) files to the package and
|
||||||
|
type-check again. This two-phase approach had four major problems:
|
||||||
|
1) in processing the tests, the loader modified the library package,
|
||||||
|
leaving no way for a client application to see both the test
|
||||||
|
package and the library package; one would mutate into the other.
|
||||||
|
2) because test files can declare additional methods on types defined in
|
||||||
|
the library portion of the package, the dispatch of method calls in
|
||||||
|
the library portion was affected by the presence of the test files.
|
||||||
|
This should have been a clue that the packages were logically
|
||||||
|
different.
|
||||||
|
3) this model of "augmentation" assumed at most one in-package test
|
||||||
|
per library package, which is true of projects using 'go build',
|
||||||
|
but not other build systems.
|
||||||
|
4) because of the two-phase nature of test processing, all packages that
|
||||||
|
import the library package had to be processed before augmentation,
|
||||||
|
forcing a "one-shot" API and preventing the client from calling Load
|
||||||
|
in several times in sequence as is now possible in WholeProgram mode.
|
||||||
|
(TypeCheck mode has a similar one-shot restriction for a different reason.)
|
||||||
|
|
||||||
|
Early drafts of this package supported "multi-shot" operation.
|
||||||
|
Although it allowed clients to make a sequence of calls (or concurrent
|
||||||
|
calls) to Load, building up the graph of Packages incrementally,
|
||||||
|
it was of marginal value: it complicated the API
|
||||||
|
(since it allowed some options to vary across calls but not others),
|
||||||
|
it complicated the implementation,
|
||||||
|
it cannot be made to work in Types mode, as explained above,
|
||||||
|
and it was less efficient than making one combined call (when this is possible).
|
||||||
|
Among the clients we have inspected, none made multiple calls to load
|
||||||
|
but could not be easily and satisfactorily modified to make only a single call.
|
||||||
|
However, applications changes may be required.
|
||||||
|
For example, the ssadump command loads the user-specified packages
|
||||||
|
and in addition the runtime package. It is tempting to simply append
|
||||||
|
"runtime" to the user-provided list, but that does not work if the user
|
||||||
|
specified an ad-hoc package such as [a.go b.go].
|
||||||
|
Instead, ssadump no longer requests the runtime package,
|
||||||
|
but seeks it among the dependencies of the user-specified packages,
|
||||||
|
and emits an error if it is not found.
|
||||||
|
|
||||||
|
Overlays: The Overlay field in the Config allows providing alternate contents
|
||||||
|
for Go source files, by providing a mapping from file path to contents.
|
||||||
|
go/packages will pull in new imports added in overlay files when go/packages
|
||||||
|
is run in LoadImports mode or greater.
|
||||||
|
Overlay support for the go list driver isn't complete yet: if the file doesn't
|
||||||
|
exist on disk, it will only be recognized in an overlay if it is a non-test file
|
||||||
|
and the package would be reported even without the overlay.
|
||||||
|
|
||||||
|
Questions & Tasks
|
||||||
|
|
||||||
|
- Add GOARCH/GOOS?
|
||||||
|
They are not portable concepts, but could be made portable.
|
||||||
|
Our goal has been to allow users to express themselves using the conventions
|
||||||
|
of the underlying build system: if the build system honors GOARCH
|
||||||
|
during a build and during a metadata query, then so should
|
||||||
|
applications built atop that query mechanism.
|
||||||
|
Conversely, if the target architecture of the build is determined by
|
||||||
|
command-line flags, the application can pass the relevant
|
||||||
|
flags through to the build system using a command such as:
|
||||||
|
myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
|
||||||
|
However, this approach is low-level, unwieldy, and non-portable.
|
||||||
|
GOOS and GOARCH seem important enough to warrant a dedicated option.
|
||||||
|
|
||||||
|
- How should we handle partial failures such as a mixture of good and
|
||||||
|
malformed patterns, existing and non-existent packages, successful and
|
||||||
|
failed builds, import failures, import cycles, and so on, in a call to
|
||||||
|
Load?
|
||||||
|
|
||||||
|
- Support bazel, blaze, and go1.10 list, not just go1.11 list.
|
||||||
|
|
||||||
|
- Handle (and test) various partial success cases, e.g.
|
||||||
|
a mixture of good packages and:
|
||||||
|
invalid patterns
|
||||||
|
nonexistent packages
|
||||||
|
empty packages
|
||||||
|
packages with malformed package or import declarations
|
||||||
|
unreadable files
|
||||||
|
import cycles
|
||||||
|
other parse errors
|
||||||
|
type errors
|
||||||
|
Make sure we record errors at the correct place in the graph.
|
||||||
|
|
||||||
|
- Missing packages among initial arguments are not reported.
|
||||||
|
Return bogus packages for them, like golist does.
|
||||||
|
|
||||||
|
- "undeclared name" errors (for example) are reported out of source file
|
||||||
|
order. I suspect this is due to the breadth-first resolution now used
|
||||||
|
by go/types. Is that a bug? Discuss with gri.
|
||||||
|
|
||||||
|
*/
|
||||||
79
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
Normal file
79
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file enables an external tool to intercept package requests.
|
||||||
|
// If the tool is present then its results are used in preference to
|
||||||
|
// the go list command.
|
||||||
|
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Driver
|
||||||
|
type driverRequest struct {
|
||||||
|
Command string `json "command"`
|
||||||
|
Mode LoadMode `json:"mode"`
|
||||||
|
Env []string `json:"env"`
|
||||||
|
BuildFlags []string `json:"build_flags"`
|
||||||
|
Tests bool `json:"tests"`
|
||||||
|
Overlay map[string][]byte `json:"overlay"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// findExternalDriver returns the file path of a tool that supplies
|
||||||
|
// the build system package structure, or "" if not found."
|
||||||
|
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
||||||
|
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
|
||||||
|
func findExternalDriver(cfg *Config) driver {
|
||||||
|
const toolPrefix = "GOPACKAGESDRIVER="
|
||||||
|
tool := ""
|
||||||
|
for _, env := range cfg.Env {
|
||||||
|
if val := strings.TrimPrefix(env, toolPrefix); val != env {
|
||||||
|
tool = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tool != "" && tool == "off" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if tool == "" {
|
||||||
|
var err error
|
||||||
|
tool, err = exec.LookPath("gopackagesdriver")
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return func(cfg *Config, words ...string) (*driverResponse, error) {
|
||||||
|
req, err := json.Marshal(driverRequest{
|
||||||
|
Mode: cfg.Mode,
|
||||||
|
Env: cfg.Env,
|
||||||
|
BuildFlags: cfg.BuildFlags,
|
||||||
|
Tests: cfg.Tests,
|
||||||
|
Overlay: cfg.Overlay,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(cfg.Context, tool, words...)
|
||||||
|
cmd.Dir = cfg.Dir
|
||||||
|
cmd.Env = cfg.Env
|
||||||
|
cmd.Stdin = bytes.NewReader(req)
|
||||||
|
cmd.Stdout = buf
|
||||||
|
cmd.Stderr = new(bytes.Buffer)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||||
|
}
|
||||||
|
var response driverResponse
|
||||||
|
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
809
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
Normal file
809
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
Normal file
@@ -0,0 +1,809 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/types"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/internal/packagesdriver"
|
||||||
|
"golang.org/x/tools/internal/gopathwalk"
|
||||||
|
"golang.org/x/tools/internal/semver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// debug controls verbose logging.
|
||||||
|
var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
|
||||||
|
|
||||||
|
// A goTooOldError reports that the go command
|
||||||
|
// found by exec.LookPath is too old to use the new go list behavior.
|
||||||
|
type goTooOldError struct {
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
// responseDeduper wraps a driverResponse, deduplicating its contents.
|
||||||
|
type responseDeduper struct {
|
||||||
|
seenRoots map[string]bool
|
||||||
|
seenPackages map[string]*Package
|
||||||
|
dr *driverResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
// init fills in r with a driverResponse.
|
||||||
|
func (r *responseDeduper) init(dr *driverResponse) {
|
||||||
|
r.dr = dr
|
||||||
|
r.seenRoots = map[string]bool{}
|
||||||
|
r.seenPackages = map[string]*Package{}
|
||||||
|
for _, pkg := range dr.Packages {
|
||||||
|
r.seenPackages[pkg.ID] = pkg
|
||||||
|
}
|
||||||
|
for _, root := range dr.Roots {
|
||||||
|
r.seenRoots[root] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseDeduper) addPackage(p *Package) {
|
||||||
|
if r.seenPackages[p.ID] != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.seenPackages[p.ID] = p
|
||||||
|
r.dr.Packages = append(r.dr.Packages, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *responseDeduper) addRoot(id string) {
|
||||||
|
if r.seenRoots[id] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.seenRoots[id] = true
|
||||||
|
r.dr.Roots = append(r.dr.Roots, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// goListDriver uses the go list command to interpret the patterns and produce
|
||||||
|
// the build system package structure.
|
||||||
|
// See driver for more details.
|
||||||
|
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||||
|
var sizes types.Sizes
|
||||||
|
var sizeserr error
|
||||||
|
var sizeswg sync.WaitGroup
|
||||||
|
if cfg.Mode >= LoadTypes {
|
||||||
|
sizeswg.Add(1)
|
||||||
|
go func() {
|
||||||
|
sizes, sizeserr = getSizes(cfg)
|
||||||
|
sizeswg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine files requested in contains patterns
|
||||||
|
var containFiles []string
|
||||||
|
var packagesNamed []string
|
||||||
|
restPatterns := make([]string, 0, len(patterns))
|
||||||
|
// Extract file= and other [querytype]= patterns. Report an error if querytype
|
||||||
|
// doesn't exist.
|
||||||
|
extractQueries:
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
eqidx := strings.Index(pattern, "=")
|
||||||
|
if eqidx < 0 {
|
||||||
|
restPatterns = append(restPatterns, pattern)
|
||||||
|
} else {
|
||||||
|
query, value := pattern[:eqidx], pattern[eqidx+len("="):]
|
||||||
|
switch query {
|
||||||
|
case "file":
|
||||||
|
containFiles = append(containFiles, value)
|
||||||
|
case "pattern":
|
||||||
|
restPatterns = append(restPatterns, value)
|
||||||
|
case "iamashamedtousethedisabledqueryname":
|
||||||
|
packagesNamed = append(packagesNamed, value)
|
||||||
|
case "": // not a reserved query
|
||||||
|
restPatterns = append(restPatterns, pattern)
|
||||||
|
default:
|
||||||
|
for _, rune := range query {
|
||||||
|
if rune < 'a' || rune > 'z' { // not a reserved query
|
||||||
|
restPatterns = append(restPatterns, pattern)
|
||||||
|
continue extractQueries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Reject all other patterns containing "="
|
||||||
|
return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released.
|
||||||
|
var listfunc driver
|
||||||
|
var isFallback bool
|
||||||
|
listfunc = func(cfg *Config, words ...string) (*driverResponse, error) {
|
||||||
|
response, err := golistDriverCurrent(cfg, words...)
|
||||||
|
if _, ok := err.(goTooOldError); ok {
|
||||||
|
isFallback = true
|
||||||
|
listfunc = golistDriverFallback
|
||||||
|
return listfunc(cfg, words...)
|
||||||
|
}
|
||||||
|
listfunc = golistDriverCurrent
|
||||||
|
return response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &responseDeduper{}
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// See if we have any patterns to pass through to go list. Zero initial
|
||||||
|
// patterns also requires a go list call, since it's the equivalent of
|
||||||
|
// ".".
|
||||||
|
if len(restPatterns) > 0 || len(patterns) == 0 {
|
||||||
|
dr, err := listfunc(cfg, restPatterns...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
response.init(dr)
|
||||||
|
} else {
|
||||||
|
response.init(&driverResponse{})
|
||||||
|
}
|
||||||
|
|
||||||
|
sizeswg.Wait()
|
||||||
|
if sizeserr != nil {
|
||||||
|
return nil, sizeserr
|
||||||
|
}
|
||||||
|
// types.SizesFor always returns nil or a *types.StdSizes
|
||||||
|
response.dr.Sizes, _ = sizes.(*types.StdSizes)
|
||||||
|
|
||||||
|
var containsCandidates []string
|
||||||
|
|
||||||
|
if len(containFiles) != 0 {
|
||||||
|
if err := runContainsQueries(cfg, listfunc, isFallback, response, containFiles); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(packagesNamed) != 0 {
|
||||||
|
if err := runNamedQueries(cfg, listfunc, response, packagesNamed); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(containFiles) > 0 {
|
||||||
|
containsCandidates = append(containsCandidates, modifiedPkgs...)
|
||||||
|
containsCandidates = append(containsCandidates, needPkgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(needPkgs) > 0 {
|
||||||
|
addNeededOverlayPackages(cfg, listfunc, response, needPkgs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check candidate packages for containFiles.
|
||||||
|
if len(containFiles) > 0 {
|
||||||
|
for _, id := range containsCandidates {
|
||||||
|
pkg := response.seenPackages[id]
|
||||||
|
for _, f := range containFiles {
|
||||||
|
for _, g := range pkg.GoFiles {
|
||||||
|
if sameFile(f, g) {
|
||||||
|
response.addRoot(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.dr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
|
||||||
|
dr, err := driver(cfg, pkgs...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, pkg := range dr.Packages {
|
||||||
|
response.addPackage(pkg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runContainsQueries(cfg *Config, driver driver, isFallback bool, response *responseDeduper, queries []string) error {
|
||||||
|
for _, query := range queries {
|
||||||
|
// TODO(matloob): Do only one query per directory.
|
||||||
|
fdir := filepath.Dir(query)
|
||||||
|
// Pass absolute path of directory to go list so that it knows to treat it as a directory,
|
||||||
|
// not a package path.
|
||||||
|
pattern, err := filepath.Abs(fdir)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
|
||||||
|
}
|
||||||
|
if isFallback {
|
||||||
|
pattern = "."
|
||||||
|
cfg.Dir = fdir
|
||||||
|
}
|
||||||
|
|
||||||
|
dirResponse, err := driver(cfg, pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
||||||
|
for _, root := range dirResponse.Roots {
|
||||||
|
isRoot[root] = true
|
||||||
|
}
|
||||||
|
for _, pkg := range dirResponse.Packages {
|
||||||
|
// Add any new packages to the main set
|
||||||
|
// We don't bother to filter packages that will be dropped by the changes of roots,
|
||||||
|
// that will happen anyway during graph construction outside this function.
|
||||||
|
// Over-reporting packages is not a problem.
|
||||||
|
response.addPackage(pkg)
|
||||||
|
// if the package was not a root one, it cannot have the file
|
||||||
|
if !isRoot[pkg.ID] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, pkgFile := range pkg.GoFiles {
|
||||||
|
if filepath.Base(query) == filepath.Base(pkgFile) {
|
||||||
|
response.addRoot(pkg.ID)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||||
|
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||||
|
|
||||||
|
func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
|
||||||
|
// calling `go env` isn't free; bail out if there's nothing to do.
|
||||||
|
if len(queries) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Determine which directories are relevant to scan.
|
||||||
|
roots, modRoot, err := roots(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan the selected directories. Simple matches, from GOPATH/GOROOT
|
||||||
|
// or the local module, can simply be "go list"ed. Matches from the
|
||||||
|
// module cache need special treatment.
|
||||||
|
var matchesMu sync.Mutex
|
||||||
|
var simpleMatches, modCacheMatches []string
|
||||||
|
add := func(root gopathwalk.Root, dir string) {
|
||||||
|
// Walk calls this concurrently; protect the result slices.
|
||||||
|
matchesMu.Lock()
|
||||||
|
defer matchesMu.Unlock()
|
||||||
|
|
||||||
|
path := dir
|
||||||
|
if dir != root.Path {
|
||||||
|
path = dir[len(root.Path)+1:]
|
||||||
|
}
|
||||||
|
if pathMatchesQueries(path, queries) {
|
||||||
|
switch root.Type {
|
||||||
|
case gopathwalk.RootModuleCache:
|
||||||
|
modCacheMatches = append(modCacheMatches, path)
|
||||||
|
case gopathwalk.RootCurrentModule:
|
||||||
|
// We'd need to read go.mod to find the full
|
||||||
|
// import path. Relative's easier.
|
||||||
|
rel, err := filepath.Rel(cfg.Dir, dir)
|
||||||
|
if err != nil {
|
||||||
|
// This ought to be impossible, since
|
||||||
|
// we found dir in the current module.
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
simpleMatches = append(simpleMatches, "./"+rel)
|
||||||
|
case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
|
||||||
|
simpleMatches = append(simpleMatches, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
startWalk := time.Now()
|
||||||
|
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
|
||||||
|
if debug {
|
||||||
|
log.Printf("%v for walk", time.Since(startWalk))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Weird special case: the top-level package in a module will be in
|
||||||
|
// whatever directory the user checked the repository out into. It's
|
||||||
|
// more reasonable for that to not match the package name. So, if there
|
||||||
|
// are any Go files in the mod root, query it just to be safe.
|
||||||
|
if modRoot != "" {
|
||||||
|
rel, err := filepath.Rel(cfg.Dir, modRoot)
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // See above.
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := ioutil.ReadDir(modRoot)
|
||||||
|
for _, f := range files {
|
||||||
|
if strings.HasSuffix(f.Name(), ".go") {
|
||||||
|
simpleMatches = append(simpleMatches, rel)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addResponse := func(r *driverResponse) {
|
||||||
|
for _, pkg := range r.Packages {
|
||||||
|
response.addPackage(pkg)
|
||||||
|
for _, name := range queries {
|
||||||
|
if pkg.Name == name {
|
||||||
|
response.addRoot(pkg.ID)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(simpleMatches) != 0 {
|
||||||
|
resp, err := driver(cfg, simpleMatches...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
addResponse(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Module cache matches are tricky. We want to avoid downloading new
|
||||||
|
// versions of things, so we need to use the ones present in the cache.
|
||||||
|
// go list doesn't accept version specifiers, so we have to write out a
|
||||||
|
// temporary module, and do the list in that module.
|
||||||
|
if len(modCacheMatches) != 0 {
|
||||||
|
// Collect all the matches, deduplicating by major version
|
||||||
|
// and preferring the newest.
|
||||||
|
type modInfo struct {
|
||||||
|
mod string
|
||||||
|
major string
|
||||||
|
}
|
||||||
|
mods := make(map[modInfo]string)
|
||||||
|
var imports []string
|
||||||
|
for _, modPath := range modCacheMatches {
|
||||||
|
matches := modCacheRegexp.FindStringSubmatch(modPath)
|
||||||
|
mod, ver := filepath.ToSlash(matches[1]), matches[2]
|
||||||
|
importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
|
||||||
|
|
||||||
|
major := semver.Major(ver)
|
||||||
|
if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
|
||||||
|
mods[modInfo{mod, major}] = ver
|
||||||
|
}
|
||||||
|
|
||||||
|
imports = append(imports, importPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the temporary module.
|
||||||
|
var gomod bytes.Buffer
|
||||||
|
gomod.WriteString("module modquery\nrequire (\n")
|
||||||
|
for mod, version := range mods {
|
||||||
|
gomod.WriteString("\t" + mod.mod + " " + version + "\n")
|
||||||
|
}
|
||||||
|
gomod.WriteString(")\n")
|
||||||
|
|
||||||
|
tmpCfg := *cfg
|
||||||
|
|
||||||
|
// We're only trying to look at stuff in the module cache, so
|
||||||
|
// disable the network. This should speed things up, and has
|
||||||
|
// prevented errors in at least one case, #28518.
|
||||||
|
tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
|
||||||
|
|
||||||
|
var err error
|
||||||
|
tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpCfg.Dir)
|
||||||
|
|
||||||
|
if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
|
||||||
|
return fmt.Errorf("writing go.mod for module cache query: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the query, using the import paths calculated from the matches above.
|
||||||
|
resp, err := driver(&tmpCfg, imports...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("querying module cache matches: %v", err)
|
||||||
|
}
|
||||||
|
addResponse(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSizes(cfg *Config) (types.Sizes, error) {
|
||||||
|
return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
|
||||||
|
}
|
||||||
|
|
||||||
|
// roots selects the appropriate paths to walk based on the passed-in configuration,
|
||||||
|
// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
|
||||||
|
func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
|
||||||
|
stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.Split(stdout.String(), "\n")
|
||||||
|
if len(fields) != 4 || len(fields[3]) != 0 {
|
||||||
|
return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
|
||||||
|
}
|
||||||
|
goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
|
||||||
|
var modDir string
|
||||||
|
if gomod != "" {
|
||||||
|
modDir = filepath.Dir(gomod)
|
||||||
|
}
|
||||||
|
|
||||||
|
var roots []gopathwalk.Root
|
||||||
|
// Always add GOROOT.
|
||||||
|
roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
|
||||||
|
// If modules are enabled, scan the module dir.
|
||||||
|
if modDir != "" {
|
||||||
|
roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
|
||||||
|
}
|
||||||
|
// Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
|
||||||
|
for _, p := range gopath {
|
||||||
|
if modDir != "" {
|
||||||
|
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
|
||||||
|
} else {
|
||||||
|
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return roots, modDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// These functions were copied from goimports. See further documentation there.
|
||||||
|
|
||||||
|
// pathMatchesQueries is adapted from pkgIsCandidate.
|
||||||
|
// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
|
||||||
|
func pathMatchesQueries(path string, queries []string) bool {
|
||||||
|
lastTwo := lastTwoComponents(path)
|
||||||
|
for _, query := range queries {
|
||||||
|
if strings.Contains(lastTwo, query) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
|
||||||
|
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
|
||||||
|
if strings.Contains(lastTwo, query) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastTwoComponents returns at most the last two path components
|
||||||
|
// of v, using either / or \ as the path separator.
|
||||||
|
func lastTwoComponents(v string) string {
|
||||||
|
nslash := 0
|
||||||
|
for i := len(v) - 1; i >= 0; i-- {
|
||||||
|
if v[i] == '/' || v[i] == '\\' {
|
||||||
|
nslash++
|
||||||
|
if nslash == 2 {
|
||||||
|
return v[i:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHyphenOrUpperASCII(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
b := s[i]
|
||||||
|
if b == '-' || ('A' <= b && b <= 'Z') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
|
||||||
|
buf := make([]byte, 0, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
b := s[i]
|
||||||
|
switch {
|
||||||
|
case b == '-':
|
||||||
|
continue
|
||||||
|
case 'A' <= b && b <= 'Z':
|
||||||
|
buf = append(buf, b+('a'-'A'))
|
||||||
|
default:
|
||||||
|
buf = append(buf, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields must match go list;
|
||||||
|
// see $GOROOT/src/cmd/go/internal/load/pkg.go.
|
||||||
|
type jsonPackage struct {
|
||||||
|
ImportPath string
|
||||||
|
Dir string
|
||||||
|
Name string
|
||||||
|
Export string
|
||||||
|
GoFiles []string
|
||||||
|
CompiledGoFiles []string
|
||||||
|
CFiles []string
|
||||||
|
CgoFiles []string
|
||||||
|
CXXFiles []string
|
||||||
|
MFiles []string
|
||||||
|
HFiles []string
|
||||||
|
FFiles []string
|
||||||
|
SFiles []string
|
||||||
|
SwigFiles []string
|
||||||
|
SwigCXXFiles []string
|
||||||
|
SysoFiles []string
|
||||||
|
Imports []string
|
||||||
|
ImportMap map[string]string
|
||||||
|
Deps []string
|
||||||
|
TestGoFiles []string
|
||||||
|
TestImports []string
|
||||||
|
XTestGoFiles []string
|
||||||
|
XTestImports []string
|
||||||
|
ForTest string // q in a "p [q.test]" package, else ""
|
||||||
|
DepOnly bool
|
||||||
|
|
||||||
|
Error *jsonPackageError
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonPackageError struct {
|
||||||
|
ImportStack []string
|
||||||
|
Pos string
|
||||||
|
Err string
|
||||||
|
}
|
||||||
|
|
||||||
|
func otherFiles(p *jsonPackage) [][]string {
|
||||||
|
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
|
||||||
|
}
|
||||||
|
|
||||||
|
// golistDriverCurrent uses the "go list" command to expand the
|
||||||
|
// pattern words and return metadata for the specified packages.
|
||||||
|
// dir may be "" and env may be nil, as per os/exec.Command.
|
||||||
|
func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) {
|
||||||
|
// go list uses the following identifiers in ImportPath and Imports:
|
||||||
|
//
|
||||||
|
// "p" -- importable package or main (command)
|
||||||
|
// "q.test" -- q's test executable
|
||||||
|
// "p [q.test]" -- variant of p as built for q's test executable
|
||||||
|
// "q_test [q.test]" -- q's external test package
|
||||||
|
//
|
||||||
|
// The packages p that are built differently for a test q.test
|
||||||
|
// are q itself, plus any helpers used by the external test q_test,
|
||||||
|
// typically including "testing" and all its dependencies.
|
||||||
|
|
||||||
|
// Run "go list" for complete
|
||||||
|
// information on the specified packages.
|
||||||
|
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
seen := make(map[string]*jsonPackage)
|
||||||
|
// Decode the JSON and convert it to Package form.
|
||||||
|
var response driverResponse
|
||||||
|
for dec := json.NewDecoder(buf); dec.More(); {
|
||||||
|
p := new(jsonPackage)
|
||||||
|
if err := dec.Decode(p); err != nil {
|
||||||
|
return nil, fmt.Errorf("JSON decoding failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.ImportPath == "" {
|
||||||
|
// The documentation for go list says that “[e]rroneous packages will have
|
||||||
|
// a non-empty ImportPath”. If for some reason it comes back empty, we
|
||||||
|
// prefer to error out rather than silently discarding data or handing
|
||||||
|
// back a package without any way to refer to it.
|
||||||
|
if p.Error != nil {
|
||||||
|
return nil, Error{
|
||||||
|
Pos: p.Error.Pos,
|
||||||
|
Msg: p.Error.Err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("package missing import path: %+v", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
if old, found := seen[p.ImportPath]; found {
|
||||||
|
if !reflect.DeepEqual(p, old) {
|
||||||
|
return nil, fmt.Errorf("go list repeated package %v with different values", p.ImportPath)
|
||||||
|
}
|
||||||
|
// skip the duplicate
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[p.ImportPath] = p
|
||||||
|
|
||||||
|
pkg := &Package{
|
||||||
|
Name: p.Name,
|
||||||
|
ID: p.ImportPath,
|
||||||
|
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
||||||
|
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
|
||||||
|
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Workaround for https://golang.org/issue/28749.
|
||||||
|
// TODO(adonovan): delete before go1.12 release.
|
||||||
|
out := pkg.CompiledGoFiles[:0]
|
||||||
|
for _, f := range pkg.CompiledGoFiles {
|
||||||
|
if strings.HasSuffix(f, ".s") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, f)
|
||||||
|
}
|
||||||
|
pkg.CompiledGoFiles = out
|
||||||
|
|
||||||
|
// Extract the PkgPath from the package's ID.
|
||||||
|
if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
|
||||||
|
pkg.PkgPath = pkg.ID[:i]
|
||||||
|
} else {
|
||||||
|
pkg.PkgPath = pkg.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkg.PkgPath == "unsafe" {
|
||||||
|
pkg.GoFiles = nil // ignore fake unsafe.go file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume go list emits only absolute paths for Dir.
|
||||||
|
if p.Dir != "" && !filepath.IsAbs(p.Dir) {
|
||||||
|
log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Export != "" && !filepath.IsAbs(p.Export) {
|
||||||
|
pkg.ExportFile = filepath.Join(p.Dir, p.Export)
|
||||||
|
} else {
|
||||||
|
pkg.ExportFile = p.Export
|
||||||
|
}
|
||||||
|
|
||||||
|
// imports
|
||||||
|
//
|
||||||
|
// Imports contains the IDs of all imported packages.
|
||||||
|
// ImportsMap records (path, ID) only where they differ.
|
||||||
|
ids := make(map[string]bool)
|
||||||
|
for _, id := range p.Imports {
|
||||||
|
ids[id] = true
|
||||||
|
}
|
||||||
|
pkg.Imports = make(map[string]*Package)
|
||||||
|
for path, id := range p.ImportMap {
|
||||||
|
pkg.Imports[path] = &Package{ID: id} // non-identity import
|
||||||
|
delete(ids, id)
|
||||||
|
}
|
||||||
|
for id := range ids {
|
||||||
|
if id == "C" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pkg.Imports[id] = &Package{ID: id} // identity import
|
||||||
|
}
|
||||||
|
if !p.DepOnly {
|
||||||
|
response.Roots = append(response.Roots, pkg.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Work around for pre-go.1.11 versions of go list.
|
||||||
|
// TODO(matloob): they should be handled by the fallback.
|
||||||
|
// Can we delete this?
|
||||||
|
if len(pkg.CompiledGoFiles) == 0 {
|
||||||
|
pkg.CompiledGoFiles = pkg.GoFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Error != nil {
|
||||||
|
pkg.Errors = append(pkg.Errors, Error{
|
||||||
|
Pos: p.Error.Pos,
|
||||||
|
Msg: p.Error.Err,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Packages = append(response.Packages, pkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// absJoin absolutizes and flattens the lists of files.
|
||||||
|
func absJoin(dir string, fileses ...[]string) (res []string) {
|
||||||
|
for _, files := range fileses {
|
||||||
|
for _, file := range files {
|
||||||
|
if !filepath.IsAbs(file) {
|
||||||
|
file = filepath.Join(dir, file)
|
||||||
|
}
|
||||||
|
res = append(res, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func golistargs(cfg *Config, words []string) []string {
|
||||||
|
fullargs := []string{
|
||||||
|
"list", "-e", "-json", "-compiled",
|
||||||
|
fmt.Sprintf("-test=%t", cfg.Tests),
|
||||||
|
fmt.Sprintf("-export=%t", usesExportData(cfg)),
|
||||||
|
fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports),
|
||||||
|
// go list doesn't let you pass -test and -find together,
|
||||||
|
// probably because you'd just get the TestMain.
|
||||||
|
fmt.Sprintf("-find=%t", cfg.Mode < LoadImports && !cfg.Tests),
|
||||||
|
}
|
||||||
|
fullargs = append(fullargs, cfg.BuildFlags...)
|
||||||
|
fullargs = append(fullargs, "--")
|
||||||
|
fullargs = append(fullargs, words...)
|
||||||
|
return fullargs
|
||||||
|
}
|
||||||
|
|
||||||
|
// invokeGo returns the stdout of a go command invocation.
|
||||||
|
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||||
|
if debug {
|
||||||
|
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(cfg, args...)) }(time.Now())
|
||||||
|
}
|
||||||
|
stdout := new(bytes.Buffer)
|
||||||
|
stderr := new(bytes.Buffer)
|
||||||
|
cmd := exec.CommandContext(cfg.Context, "go", args...)
|
||||||
|
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||||
|
// expects the working directory to keep the original path, including the
|
||||||
|
// go command when dealing with modules.
|
||||||
|
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||||
|
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||||
|
// process we fix up all the paths returned by the go command.
|
||||||
|
cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
|
||||||
|
cmd.Dir = cfg.Dir
|
||||||
|
cmd.Stdout = stdout
|
||||||
|
cmd.Stderr = stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
exitErr, ok := err.(*exec.ExitError)
|
||||||
|
if !ok {
|
||||||
|
// Catastrophic error:
|
||||||
|
// - executable not found
|
||||||
|
// - context cancellation
|
||||||
|
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Old go version?
|
||||||
|
if strings.Contains(stderr.String(), "flag provided but not defined") {
|
||||||
|
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export mode entails a build.
|
||||||
|
// If that build fails, errors appear on stderr
|
||||||
|
// (despite the -e flag) and the Export field is blank.
|
||||||
|
// Do not fail in that case.
|
||||||
|
// The same is true if an ad-hoc package given to go list doesn't exist.
|
||||||
|
// TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
|
||||||
|
// packages don't exist or a build fails.
|
||||||
|
if !usesExportData(cfg) && !containsGoFile(args) {
|
||||||
|
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// As of writing, go list -export prints some non-fatal compilation
|
||||||
|
// errors to stderr, even with -e set. We would prefer that it put
|
||||||
|
// them in the Package.Error JSON (see https://golang.org/issue/26319).
|
||||||
|
// In the meantime, there's nowhere good to put them, but they can
|
||||||
|
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
|
||||||
|
// is set.
|
||||||
|
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cfg, args...), stderr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugging
|
||||||
|
if false {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cfg, args...), stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsGoFile(s []string) bool {
|
||||||
|
for _, f := range s {
|
||||||
|
if strings.HasSuffix(f, ".go") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdDebugStr(cfg *Config, args ...string) string {
|
||||||
|
env := make(map[string]string)
|
||||||
|
for _, kv := range cfg.Env {
|
||||||
|
split := strings.Split(kv, "=")
|
||||||
|
k, v := split[0], split[1]
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
|
||||||
|
}
|
||||||
450
vendor/golang.org/x/tools/go/packages/golist_fallback.go
generated
vendored
Normal file
450
vendor/golang.org/x/tools/go/packages/golist_fallback.go
generated
vendored
Normal file
@@ -0,0 +1,450 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/build"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/internal/cgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(matloob): Delete this file once Go 1.12 is released.
|
||||||
|
|
||||||
|
// This file provides backwards compatibility support for
|
||||||
|
// loading for versions of Go earlier than 1.11. This support is meant to
|
||||||
|
// assist with migration to the Package API until there's
|
||||||
|
// widespread adoption of these newer Go versions.
|
||||||
|
// This support will be removed once Go 1.12 is released
|
||||||
|
// in Q1 2019.
|
||||||
|
|
||||||
|
func golistDriverFallback(cfg *Config, words ...string) (*driverResponse, error) {
|
||||||
|
// Turn absolute paths into GOROOT and GOPATH-relative paths to provide to go list.
|
||||||
|
// This will have surprising behavior if GOROOT or GOPATH contain multiple packages with the same
|
||||||
|
// path and a user provides an absolute path to a directory that's shadowed by an earlier
|
||||||
|
// directory in GOROOT or GOPATH with the same package path.
|
||||||
|
words = cleanAbsPaths(cfg, words)
|
||||||
|
|
||||||
|
original, deps, err := getDeps(cfg, words...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tmpdir string // used for generated cgo files
|
||||||
|
var needsTestVariant []struct {
|
||||||
|
pkg, xtestPkg *Package
|
||||||
|
}
|
||||||
|
|
||||||
|
var response driverResponse
|
||||||
|
allPkgs := make(map[string]bool)
|
||||||
|
addPackage := func(p *jsonPackage, isRoot bool) {
|
||||||
|
id := p.ImportPath
|
||||||
|
|
||||||
|
if allPkgs[id] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
allPkgs[id] = true
|
||||||
|
|
||||||
|
pkgpath := id
|
||||||
|
|
||||||
|
if pkgpath == "unsafe" {
|
||||||
|
p.GoFiles = nil // ignore fake unsafe.go file
|
||||||
|
}
|
||||||
|
|
||||||
|
importMap := func(importlist []string) map[string]*Package {
|
||||||
|
importMap := make(map[string]*Package)
|
||||||
|
for _, id := range importlist {
|
||||||
|
|
||||||
|
if id == "C" {
|
||||||
|
for _, path := range []string{"unsafe", "syscall", "runtime/cgo"} {
|
||||||
|
if pkgpath != path && importMap[path] == nil {
|
||||||
|
importMap[path] = &Package{ID: path}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
importMap[vendorlessPath(id)] = &Package{ID: id}
|
||||||
|
}
|
||||||
|
return importMap
|
||||||
|
}
|
||||||
|
compiledGoFiles := absJoin(p.Dir, p.GoFiles)
|
||||||
|
// Use a function to simplify control flow. It's just a bunch of gotos.
|
||||||
|
var cgoErrors []error
|
||||||
|
var outdir string
|
||||||
|
getOutdir := func() (string, error) {
|
||||||
|
if outdir != "" {
|
||||||
|
return outdir, nil
|
||||||
|
}
|
||||||
|
if tmpdir == "" {
|
||||||
|
if tmpdir, err = ioutil.TempDir("", "gopackages"); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
outdir = filepath.Join(tmpdir, strings.Replace(p.ImportPath, "/", "_", -1))
|
||||||
|
if err := os.MkdirAll(outdir, 0755); err != nil {
|
||||||
|
outdir = ""
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return outdir, nil
|
||||||
|
}
|
||||||
|
processCgo := func() bool {
|
||||||
|
// Suppress any cgo errors. Any relevant errors will show up in typechecking.
|
||||||
|
// TODO(matloob): Skip running cgo if Mode < LoadTypes.
|
||||||
|
outdir, err := getOutdir()
|
||||||
|
if err != nil {
|
||||||
|
cgoErrors = append(cgoErrors, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
files, _, err := runCgo(p.Dir, outdir, cfg.Env)
|
||||||
|
if err != nil {
|
||||||
|
cgoErrors = append(cgoErrors, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
compiledGoFiles = append(compiledGoFiles, files...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(p.CgoFiles) == 0 || !processCgo() {
|
||||||
|
compiledGoFiles = append(compiledGoFiles, absJoin(p.Dir, p.CgoFiles)...) // Punt to typechecker.
|
||||||
|
}
|
||||||
|
if isRoot {
|
||||||
|
response.Roots = append(response.Roots, id)
|
||||||
|
}
|
||||||
|
pkg := &Package{
|
||||||
|
ID: id,
|
||||||
|
Name: p.Name,
|
||||||
|
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
||||||
|
CompiledGoFiles: compiledGoFiles,
|
||||||
|
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||||
|
PkgPath: pkgpath,
|
||||||
|
Imports: importMap(p.Imports),
|
||||||
|
// TODO(matloob): set errors on the Package to cgoErrors
|
||||||
|
}
|
||||||
|
if p.Error != nil {
|
||||||
|
pkg.Errors = append(pkg.Errors, Error{
|
||||||
|
Pos: p.Error.Pos,
|
||||||
|
Msg: p.Error.Err,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
response.Packages = append(response.Packages, pkg)
|
||||||
|
if cfg.Tests && isRoot {
|
||||||
|
testID := fmt.Sprintf("%s [%s.test]", id, id)
|
||||||
|
if len(p.TestGoFiles) > 0 || len(p.XTestGoFiles) > 0 {
|
||||||
|
response.Roots = append(response.Roots, testID)
|
||||||
|
testPkg := &Package{
|
||||||
|
ID: testID,
|
||||||
|
Name: p.Name,
|
||||||
|
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles, p.TestGoFiles),
|
||||||
|
CompiledGoFiles: append(compiledGoFiles, absJoin(p.Dir, p.TestGoFiles)...),
|
||||||
|
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||||
|
PkgPath: pkgpath,
|
||||||
|
Imports: importMap(append(p.Imports, p.TestImports...)),
|
||||||
|
// TODO(matloob): set errors on the Package to cgoErrors
|
||||||
|
}
|
||||||
|
response.Packages = append(response.Packages, testPkg)
|
||||||
|
var xtestPkg *Package
|
||||||
|
if len(p.XTestGoFiles) > 0 {
|
||||||
|
xtestID := fmt.Sprintf("%s_test [%s.test]", id, id)
|
||||||
|
response.Roots = append(response.Roots, xtestID)
|
||||||
|
// Generate test variants for all packages q where a path exists
|
||||||
|
// such that xtestPkg -> ... -> q -> ... -> p (where p is the package under test)
|
||||||
|
// and rewrite all import map entries of p to point to testPkg (the test variant of
|
||||||
|
// p), and of each q to point to the test variant of that q.
|
||||||
|
xtestPkg = &Package{
|
||||||
|
ID: xtestID,
|
||||||
|
Name: p.Name + "_test",
|
||||||
|
GoFiles: absJoin(p.Dir, p.XTestGoFiles),
|
||||||
|
CompiledGoFiles: absJoin(p.Dir, p.XTestGoFiles),
|
||||||
|
PkgPath: pkgpath + "_test",
|
||||||
|
Imports: importMap(p.XTestImports),
|
||||||
|
}
|
||||||
|
// Add to list of packages we need to rewrite imports for to refer to test variants.
|
||||||
|
// We may need to create a test variant of a package that hasn't been loaded yet, so
|
||||||
|
// the test variants need to be created later.
|
||||||
|
needsTestVariant = append(needsTestVariant, struct{ pkg, xtestPkg *Package }{pkg, xtestPkg})
|
||||||
|
response.Packages = append(response.Packages, xtestPkg)
|
||||||
|
}
|
||||||
|
// testmain package
|
||||||
|
testmainID := id + ".test"
|
||||||
|
response.Roots = append(response.Roots, testmainID)
|
||||||
|
imports := map[string]*Package{}
|
||||||
|
imports[testPkg.PkgPath] = &Package{ID: testPkg.ID}
|
||||||
|
if xtestPkg != nil {
|
||||||
|
imports[xtestPkg.PkgPath] = &Package{ID: xtestPkg.ID}
|
||||||
|
}
|
||||||
|
testmainPkg := &Package{
|
||||||
|
ID: testmainID,
|
||||||
|
Name: "main",
|
||||||
|
PkgPath: testmainID,
|
||||||
|
Imports: imports,
|
||||||
|
}
|
||||||
|
response.Packages = append(response.Packages, testmainPkg)
|
||||||
|
outdir, err := getOutdir()
|
||||||
|
if err != nil {
|
||||||
|
testmainPkg.Errors = append(testmainPkg.Errors, Error{
|
||||||
|
Pos: "-",
|
||||||
|
Msg: fmt.Sprintf("failed to generate testmain: %v", err),
|
||||||
|
Kind: ListError,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Don't use a .go extension on the file, so that the tests think the file is inside GOCACHE.
|
||||||
|
// This allows the same test to test the pre- and post-Go 1.11 go list logic because the Go 1.11
|
||||||
|
// go list generates test mains in the cache, and the test code knows not to rely on paths in the
|
||||||
|
// cache to stay stable.
|
||||||
|
testmain := filepath.Join(outdir, "testmain-go")
|
||||||
|
extraimports, extradeps, err := generateTestmain(testmain, testPkg, xtestPkg)
|
||||||
|
if err != nil {
|
||||||
|
testmainPkg.Errors = append(testmainPkg.Errors, Error{
|
||||||
|
Pos: "-",
|
||||||
|
Msg: fmt.Sprintf("failed to generate testmain: %v", err),
|
||||||
|
Kind: ListError,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
deps = append(deps, extradeps...)
|
||||||
|
for _, imp := range extraimports { // testing, testing/internal/testdeps, and maybe os
|
||||||
|
imports[imp] = &Package{ID: imp}
|
||||||
|
}
|
||||||
|
testmainPkg.GoFiles = []string{testmain}
|
||||||
|
testmainPkg.CompiledGoFiles = []string{testmain}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pkg := range original {
|
||||||
|
addPackage(pkg, true)
|
||||||
|
}
|
||||||
|
if cfg.Mode < LoadImports || len(deps) == 0 {
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := invokeGo(cfg, golistArgsFallback(cfg, deps)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the JSON and convert it to Package form.
|
||||||
|
for dec := json.NewDecoder(buf); dec.More(); {
|
||||||
|
p := new(jsonPackage)
|
||||||
|
if err := dec.Decode(p); err != nil {
|
||||||
|
return nil, fmt.Errorf("JSON decoding failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addPackage(p, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range needsTestVariant {
|
||||||
|
createTestVariants(&response, v.pkg, v.xtestPkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestVariants(response *driverResponse, pkgUnderTest, xtestPkg *Package) {
|
||||||
|
allPkgs := make(map[string]*Package)
|
||||||
|
for _, pkg := range response.Packages {
|
||||||
|
allPkgs[pkg.ID] = pkg
|
||||||
|
}
|
||||||
|
needsTestVariant := make(map[string]bool)
|
||||||
|
needsTestVariant[pkgUnderTest.ID] = true
|
||||||
|
var needsVariantRec func(p *Package) bool
|
||||||
|
needsVariantRec = func(p *Package) bool {
|
||||||
|
if needsTestVariant[p.ID] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, imp := range p.Imports {
|
||||||
|
if needsVariantRec(allPkgs[imp.ID]) {
|
||||||
|
// Don't break because we want to make sure all dependencies
|
||||||
|
// have been processed, and all required test variants of our dependencies
|
||||||
|
// exist.
|
||||||
|
needsTestVariant[p.ID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !needsTestVariant[p.ID] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Create a clone of the package. It will share the same strings and lists of source files,
|
||||||
|
// but that's okay. It's only necessary for the Imports map to have a separate identity.
|
||||||
|
testVariant := *p
|
||||||
|
testVariant.ID = fmt.Sprintf("%s [%s.test]", p.ID, pkgUnderTest.ID)
|
||||||
|
testVariant.Imports = make(map[string]*Package)
|
||||||
|
for imp, pkg := range p.Imports {
|
||||||
|
testVariant.Imports[imp] = pkg
|
||||||
|
if needsTestVariant[pkg.ID] {
|
||||||
|
testVariant.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
response.Packages = append(response.Packages, &testVariant)
|
||||||
|
return needsTestVariant[p.ID]
|
||||||
|
}
|
||||||
|
// finally, update the xtest package's imports
|
||||||
|
for imp, pkg := range xtestPkg.Imports {
|
||||||
|
if allPkgs[pkg.ID] == nil {
|
||||||
|
fmt.Printf("for %s: package %s doesn't exist\n", xtestPkg.ID, pkg.ID)
|
||||||
|
}
|
||||||
|
if needsVariantRec(allPkgs[pkg.ID]) {
|
||||||
|
xtestPkg.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanAbsPaths replaces all absolute paths with GOPATH- and GOROOT-relative
|
||||||
|
// paths. If an absolute path is not GOPATH- or GOROOT- relative, it is left as an
|
||||||
|
// absolute path so an error can be returned later.
|
||||||
|
func cleanAbsPaths(cfg *Config, words []string) []string {
|
||||||
|
var searchpaths []string
|
||||||
|
var cleaned = make([]string, len(words))
|
||||||
|
for i := range cleaned {
|
||||||
|
cleaned[i] = words[i]
|
||||||
|
// Ignore relative directory paths (they must already be goroot-relative) and Go source files
|
||||||
|
// (absolute source files are already allowed for ad-hoc packages).
|
||||||
|
// TODO(matloob): Can there be non-.go files in ad-hoc packages.
|
||||||
|
if !filepath.IsAbs(cleaned[i]) || strings.HasSuffix(cleaned[i], ".go") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// otherwise, it's an absolute path. Search GOPATH and GOROOT to find it.
|
||||||
|
if searchpaths == nil {
|
||||||
|
cmd := exec.Command("go", "env", "GOPATH", "GOROOT")
|
||||||
|
cmd.Env = cfg.Env
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
searchpaths = []string{}
|
||||||
|
continue // suppress the error, it will show up again when running go list
|
||||||
|
}
|
||||||
|
lines := strings.Split(string(out), "\n")
|
||||||
|
if len(lines) != 3 || lines[0] == "" || lines[1] == "" || lines[2] != "" {
|
||||||
|
continue // suppress error
|
||||||
|
}
|
||||||
|
// first line is GOPATH
|
||||||
|
for _, path := range filepath.SplitList(lines[0]) {
|
||||||
|
searchpaths = append(searchpaths, filepath.Join(path, "src"))
|
||||||
|
}
|
||||||
|
// second line is GOROOT
|
||||||
|
searchpaths = append(searchpaths, filepath.Join(lines[1], "src"))
|
||||||
|
}
|
||||||
|
for _, sp := range searchpaths {
|
||||||
|
if strings.HasPrefix(cleaned[i], sp) {
|
||||||
|
cleaned[i] = strings.TrimPrefix(cleaned[i], sp)
|
||||||
|
cleaned[i] = strings.TrimLeft(cleaned[i], string(filepath.Separator))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cleaned
|
||||||
|
}
|
||||||
|
|
||||||
|
// vendorlessPath returns the devendorized version of the import path ipath.
|
||||||
|
// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b".
|
||||||
|
// Copied from golang.org/x/tools/imports/fix.go.
|
||||||
|
func vendorlessPath(ipath string) string {
|
||||||
|
// Devendorize for use in import statement.
|
||||||
|
if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
|
||||||
|
return ipath[i+len("/vendor/"):]
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(ipath, "vendor/") {
|
||||||
|
return ipath[len("vendor/"):]
|
||||||
|
}
|
||||||
|
return ipath
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDeps runs an initial go list to determine all the dependency packages.
|
||||||
|
func getDeps(cfg *Config, words ...string) (initial []*jsonPackage, deps []string, err error) {
|
||||||
|
buf, err := invokeGo(cfg, golistArgsFallback(cfg, words)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
depsSet := make(map[string]bool)
|
||||||
|
var testImports []string
|
||||||
|
|
||||||
|
// Extract deps from the JSON.
|
||||||
|
for dec := json.NewDecoder(buf); dec.More(); {
|
||||||
|
p := new(jsonPackage)
|
||||||
|
if err := dec.Decode(p); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("JSON decoding failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
initial = append(initial, p)
|
||||||
|
for _, dep := range p.Deps {
|
||||||
|
depsSet[dep] = true
|
||||||
|
}
|
||||||
|
if cfg.Tests {
|
||||||
|
// collect the additional imports of the test packages.
|
||||||
|
pkgTestImports := append(p.TestImports, p.XTestImports...)
|
||||||
|
for _, imp := range pkgTestImports {
|
||||||
|
if depsSet[imp] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
depsSet[imp] = true
|
||||||
|
testImports = append(testImports, imp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Get the deps of the packages imported by tests.
|
||||||
|
if len(testImports) > 0 {
|
||||||
|
buf, err = invokeGo(cfg, golistArgsFallback(cfg, testImports)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// Extract deps from the JSON.
|
||||||
|
for dec := json.NewDecoder(buf); dec.More(); {
|
||||||
|
p := new(jsonPackage)
|
||||||
|
if err := dec.Decode(p); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("JSON decoding failed: %v", err)
|
||||||
|
}
|
||||||
|
for _, dep := range p.Deps {
|
||||||
|
depsSet[dep] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, orig := range initial {
|
||||||
|
delete(depsSet, orig.ImportPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
deps = make([]string, 0, len(depsSet))
|
||||||
|
for dep := range depsSet {
|
||||||
|
deps = append(deps, dep)
|
||||||
|
}
|
||||||
|
sort.Strings(deps) // ensure output is deterministic
|
||||||
|
return initial, deps, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func golistArgsFallback(cfg *Config, words []string) []string {
|
||||||
|
fullargs := []string{"list", "-e", "-json"}
|
||||||
|
fullargs = append(fullargs, cfg.BuildFlags...)
|
||||||
|
fullargs = append(fullargs, "--")
|
||||||
|
fullargs = append(fullargs, words...)
|
||||||
|
return fullargs
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCgo(pkgdir, tmpdir string, env []string) (files, displayfiles []string, err error) {
|
||||||
|
// Use go/build to open cgo files and determine the cgo flags, etc, from them.
|
||||||
|
// This is tricky so it's best to avoid reimplementing as much as we can, and
|
||||||
|
// we plan to delete this support once Go 1.12 is released anyways.
|
||||||
|
// TODO(matloob): This isn't completely correct because we're using the Default
|
||||||
|
// context. Perhaps we should more accurately fill in the context.
|
||||||
|
bp, err := build.ImportDir(pkgdir, build.ImportMode(0))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
for _, ev := range env {
|
||||||
|
if v := strings.TrimPrefix(ev, "CGO_CPPFLAGS"); v != ev {
|
||||||
|
bp.CgoCPPFLAGS = append(bp.CgoCPPFLAGS, strings.Fields(v)...)
|
||||||
|
} else if v := strings.TrimPrefix(ev, "CGO_CFLAGS"); v != ev {
|
||||||
|
bp.CgoCFLAGS = append(bp.CgoCFLAGS, strings.Fields(v)...)
|
||||||
|
} else if v := strings.TrimPrefix(ev, "CGO_CXXFLAGS"); v != ev {
|
||||||
|
bp.CgoCXXFLAGS = append(bp.CgoCXXFLAGS, strings.Fields(v)...)
|
||||||
|
} else if v := strings.TrimPrefix(ev, "CGO_LDFLAGS"); v != ev {
|
||||||
|
bp.CgoLDFLAGS = append(bp.CgoLDFLAGS, strings.Fields(v)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cgo.Run(bp, pkgdir, tmpdir, true)
|
||||||
|
}
|
||||||
318
vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go
generated
vendored
Normal file
318
vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go
generated
vendored
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This file is largely based on the Go 1.10-era cmd/go/internal/test/test.go
|
||||||
|
// testmain generation code.
|
||||||
|
|
||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/doc"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(matloob): Delete this file once Go 1.12 is released.
|
||||||
|
|
||||||
|
// This file complements golist_fallback.go by providing
|
||||||
|
// support for generating testmains.
|
||||||
|
|
||||||
|
func generateTestmain(out string, testPkg, xtestPkg *Package) (extraimports, extradeps []string, err error) {
|
||||||
|
testFuncs, err := loadTestFuncs(testPkg, xtestPkg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
extraimports = []string{"testing", "testing/internal/testdeps"}
|
||||||
|
if testFuncs.TestMain == nil {
|
||||||
|
extraimports = append(extraimports, "os")
|
||||||
|
}
|
||||||
|
// Transitive dependencies of ("testing", "testing/internal/testdeps").
|
||||||
|
// os is part of the transitive closure so it and its transitive dependencies are
|
||||||
|
// included regardless of whether it's imported in the template below.
|
||||||
|
extradeps = []string{
|
||||||
|
"errors",
|
||||||
|
"internal/cpu",
|
||||||
|
"unsafe",
|
||||||
|
"internal/bytealg",
|
||||||
|
"internal/race",
|
||||||
|
"runtime/internal/atomic",
|
||||||
|
"runtime/internal/sys",
|
||||||
|
"runtime",
|
||||||
|
"sync/atomic",
|
||||||
|
"sync",
|
||||||
|
"io",
|
||||||
|
"unicode",
|
||||||
|
"unicode/utf8",
|
||||||
|
"bytes",
|
||||||
|
"math",
|
||||||
|
"syscall",
|
||||||
|
"time",
|
||||||
|
"internal/poll",
|
||||||
|
"internal/syscall/unix",
|
||||||
|
"internal/testlog",
|
||||||
|
"os",
|
||||||
|
"math/bits",
|
||||||
|
"strconv",
|
||||||
|
"reflect",
|
||||||
|
"fmt",
|
||||||
|
"sort",
|
||||||
|
"strings",
|
||||||
|
"flag",
|
||||||
|
"runtime/debug",
|
||||||
|
"context",
|
||||||
|
"runtime/trace",
|
||||||
|
"testing",
|
||||||
|
"bufio",
|
||||||
|
"regexp/syntax",
|
||||||
|
"regexp",
|
||||||
|
"compress/flate",
|
||||||
|
"encoding/binary",
|
||||||
|
"hash",
|
||||||
|
"hash/crc32",
|
||||||
|
"compress/gzip",
|
||||||
|
"path/filepath",
|
||||||
|
"io/ioutil",
|
||||||
|
"text/tabwriter",
|
||||||
|
"runtime/pprof",
|
||||||
|
"testing/internal/testdeps",
|
||||||
|
}
|
||||||
|
return extraimports, extradeps, writeTestmain(out, testFuncs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following is adapted from the cmd/go testmain generation code.
|
||||||
|
|
||||||
|
// isTestFunc tells whether fn has the type of a testing function. arg
|
||||||
|
// specifies the parameter type we look for: B, M or T.
|
||||||
|
func isTestFunc(fn *ast.FuncDecl, arg string) bool {
|
||||||
|
if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 ||
|
||||||
|
fn.Type.Params.List == nil ||
|
||||||
|
len(fn.Type.Params.List) != 1 ||
|
||||||
|
len(fn.Type.Params.List[0].Names) > 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// We can't easily check that the type is *testing.M
|
||||||
|
// because we don't know how testing has been imported,
|
||||||
|
// but at least check that it's *M or *something.M.
|
||||||
|
// Same applies for B and T.
|
||||||
|
if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isTest tells whether name looks like a test (or benchmark, according to prefix).
|
||||||
|
// It is a Test (say) if there is a character after Test that is not a lower-case letter.
|
||||||
|
// We don't want TesticularCancer.
|
||||||
|
func isTest(name, prefix string) bool {
|
||||||
|
if !strings.HasPrefix(name, prefix) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(name) == len(prefix) { // "Test" is ok
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
|
||||||
|
return !unicode.IsLower(rune)
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadTestFuncs returns the testFuncs describing the tests that will be run.
|
||||||
|
func loadTestFuncs(ptest, pxtest *Package) (*testFuncs, error) {
|
||||||
|
t := &testFuncs{
|
||||||
|
TestPackage: ptest,
|
||||||
|
XTestPackage: pxtest,
|
||||||
|
}
|
||||||
|
for _, file := range ptest.GoFiles {
|
||||||
|
if !strings.HasSuffix(file, "_test.go") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := t.load(file, "_test", &t.ImportTest, &t.NeedTest); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pxtest != nil {
|
||||||
|
for _, file := range pxtest.GoFiles {
|
||||||
|
if err := t.load(file, "_xtest", &t.ImportXtest, &t.NeedXtest); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeTestmain writes the _testmain.go file for t to the file named out.
|
||||||
|
func writeTestmain(out string, t *testFuncs) error {
|
||||||
|
f, err := os.Create(out)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if err := testmainTmpl.Execute(f, t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type testFuncs struct {
|
||||||
|
Tests []testFunc
|
||||||
|
Benchmarks []testFunc
|
||||||
|
Examples []testFunc
|
||||||
|
TestMain *testFunc
|
||||||
|
TestPackage *Package
|
||||||
|
XTestPackage *Package
|
||||||
|
ImportTest bool
|
||||||
|
NeedTest bool
|
||||||
|
ImportXtest bool
|
||||||
|
NeedXtest bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tested returns the name of the package being tested.
|
||||||
|
func (t *testFuncs) Tested() string {
|
||||||
|
return t.TestPackage.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
type testFunc struct {
|
||||||
|
Package string // imported package name (_test or _xtest)
|
||||||
|
Name string // function name
|
||||||
|
Output string // output, for examples
|
||||||
|
Unordered bool // output is allowed to be unordered.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error {
|
||||||
|
var fset = token.NewFileSet()
|
||||||
|
|
||||||
|
f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("failed to parse test file " + filename)
|
||||||
|
}
|
||||||
|
for _, d := range f.Decls {
|
||||||
|
n, ok := d.(*ast.FuncDecl)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n.Recv != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := n.Name.String()
|
||||||
|
switch {
|
||||||
|
case name == "TestMain":
|
||||||
|
if isTestFunc(n, "T") {
|
||||||
|
t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
|
||||||
|
*doImport, *seen = true, true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := checkTestFunc(fset, n, "M")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t.TestMain != nil {
|
||||||
|
return errors.New("multiple definitions of TestMain")
|
||||||
|
}
|
||||||
|
t.TestMain = &testFunc{pkg, name, "", false}
|
||||||
|
*doImport, *seen = true, true
|
||||||
|
case isTest(name, "Test"):
|
||||||
|
err := checkTestFunc(fset, n, "T")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
|
||||||
|
*doImport, *seen = true, true
|
||||||
|
case isTest(name, "Benchmark"):
|
||||||
|
err := checkTestFunc(fset, n, "B")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false})
|
||||||
|
*doImport, *seen = true, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ex := doc.Examples(f)
|
||||||
|
sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order })
|
||||||
|
for _, e := range ex {
|
||||||
|
*doImport = true // import test file whether executed or not
|
||||||
|
if e.Output == "" && !e.EmptyOutput {
|
||||||
|
// Don't run examples with no output.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered})
|
||||||
|
*seen = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkTestFunc(fset *token.FileSet, fn *ast.FuncDecl, arg string) error {
|
||||||
|
if !isTestFunc(fn, arg) {
|
||||||
|
name := fn.Name.String()
|
||||||
|
pos := fset.Position(fn.Pos())
|
||||||
|
return fmt.Errorf("%s: wrong signature for %s, must be: func %s(%s *testing.%s)", pos, name, name, strings.ToLower(arg), arg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var testmainTmpl = template.Must(template.New("main").Parse(`
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
{{if not .TestMain}}
|
||||||
|
"os"
|
||||||
|
{{end}}
|
||||||
|
"testing"
|
||||||
|
"testing/internal/testdeps"
|
||||||
|
|
||||||
|
{{if .ImportTest}}
|
||||||
|
{{if .NeedTest}}_test{{else}}_{{end}} {{.TestPackage.PkgPath | printf "%q"}}
|
||||||
|
{{end}}
|
||||||
|
{{if .ImportXtest}}
|
||||||
|
{{if .NeedXtest}}_xtest{{else}}_{{end}} {{.XTestPackage.PkgPath | printf "%q"}}
|
||||||
|
{{end}}
|
||||||
|
)
|
||||||
|
|
||||||
|
var tests = []testing.InternalTest{
|
||||||
|
{{range .Tests}}
|
||||||
|
{"{{.Name}}", {{.Package}}.{{.Name}}},
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
|
||||||
|
var benchmarks = []testing.InternalBenchmark{
|
||||||
|
{{range .Benchmarks}}
|
||||||
|
{"{{.Name}}", {{.Package}}.{{.Name}}},
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
|
||||||
|
var examples = []testing.InternalExample{
|
||||||
|
{{range .Examples}}
|
||||||
|
{"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
testdeps.ImportPath = {{.TestPackage.PkgPath | printf "%q"}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)
|
||||||
|
{{with .TestMain}}
|
||||||
|
{{.Package}}.{{.Name}}(m)
|
||||||
|
{{else}}
|
||||||
|
os.Exit(m.Run())
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
|
||||||
|
`))
|
||||||
104
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
Normal file
104
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// processGolistOverlay provides rudimentary support for adding
|
||||||
|
// files that don't exist on disk to an overlay. The results can be
|
||||||
|
// sometimes incorrect.
|
||||||
|
// TODO(matloob): Handle unsupported cases, including the following:
|
||||||
|
// - test files
|
||||||
|
// - adding test and non-test files to test variants of packages
|
||||||
|
// - determining the correct package to add given a new import path
|
||||||
|
// - creating packages that don't exist
|
||||||
|
func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, needPkgs []string, err error) {
|
||||||
|
havePkgs := make(map[string]string) // importPath -> non-test package ID
|
||||||
|
needPkgsSet := make(map[string]bool)
|
||||||
|
modifiedPkgsSet := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, pkg := range response.Packages {
|
||||||
|
// This is an approximation of import path to id. This can be
|
||||||
|
// wrong for tests, vendored packages, and a number of other cases.
|
||||||
|
havePkgs[pkg.PkgPath] = pkg.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
outer:
|
||||||
|
for path, contents := range cfg.Overlay {
|
||||||
|
base := filepath.Base(path)
|
||||||
|
if strings.HasSuffix(path, "_test.go") {
|
||||||
|
// Overlays don't support adding new test files yet.
|
||||||
|
// TODO(matloob): support adding new test files.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
for _, pkg := range response.Packages {
|
||||||
|
var dirContains, fileExists bool
|
||||||
|
for _, f := range pkg.GoFiles {
|
||||||
|
if sameFile(filepath.Dir(f), dir) {
|
||||||
|
dirContains = true
|
||||||
|
}
|
||||||
|
if filepath.Base(f) == base {
|
||||||
|
fileExists = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dirContains {
|
||||||
|
if !fileExists {
|
||||||
|
pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles?
|
||||||
|
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path)
|
||||||
|
modifiedPkgsSet[pkg.ID] = true
|
||||||
|
}
|
||||||
|
imports, err := extractImports(path, contents)
|
||||||
|
if err != nil {
|
||||||
|
// Let the parser or type checker report errors later.
|
||||||
|
continue outer
|
||||||
|
}
|
||||||
|
for _, imp := range imports {
|
||||||
|
_, found := pkg.Imports[imp]
|
||||||
|
if !found {
|
||||||
|
needPkgsSet[imp] = true
|
||||||
|
// TODO(matloob): Handle cases when the following block isn't correct.
|
||||||
|
// These include imports of test variants, imports of vendored packages, etc.
|
||||||
|
id, ok := havePkgs[imp]
|
||||||
|
if !ok {
|
||||||
|
id = imp
|
||||||
|
}
|
||||||
|
pkg.Imports[imp] = &Package{ID: id}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
needPkgs = make([]string, 0, len(needPkgsSet))
|
||||||
|
for pkg := range needPkgsSet {
|
||||||
|
needPkgs = append(needPkgs, pkg)
|
||||||
|
}
|
||||||
|
modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
|
||||||
|
for pkg := range modifiedPkgsSet {
|
||||||
|
modifiedPkgs = append(modifiedPkgs, pkg)
|
||||||
|
}
|
||||||
|
return modifiedPkgs, needPkgs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractImports(filename string, contents []byte) ([]string, error) {
|
||||||
|
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []string
|
||||||
|
for _, imp := range f.Imports {
|
||||||
|
quotedPath := imp.Path.Value
|
||||||
|
path, err := strconv.Unquote(quotedPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res = append(res, path)
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
955
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
Normal file
955
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
Normal file
@@ -0,0 +1,955 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package packages
|
||||||
|
|
||||||
|
// See doc.go for package documentation and implementation notes.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/parser"
|
||||||
|
"go/scanner"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/gcexportdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A LoadMode specifies the amount of detail to return when loading.
|
||||||
|
// Higher-numbered modes cause Load to return more information,
|
||||||
|
// but may be slower. Load may return more information than requested.
|
||||||
|
type LoadMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LoadFiles finds the packages and computes their source file lists.
|
||||||
|
// Package fields: ID, Name, Errors, GoFiles, and OtherFiles.
|
||||||
|
LoadFiles LoadMode = iota
|
||||||
|
|
||||||
|
// LoadImports adds import information for each package
|
||||||
|
// and its dependencies.
|
||||||
|
// Package fields added: Imports.
|
||||||
|
LoadImports
|
||||||
|
|
||||||
|
// LoadTypes adds type information for package-level
|
||||||
|
// declarations in the packages matching the patterns.
|
||||||
|
// Package fields added: Types, Fset, and IllTyped.
|
||||||
|
// This mode uses type information provided by the build system when
|
||||||
|
// possible, and may fill in the ExportFile field.
|
||||||
|
LoadTypes
|
||||||
|
|
||||||
|
// LoadSyntax adds typed syntax trees for the packages matching the patterns.
|
||||||
|
// Package fields added: Syntax, and TypesInfo, for direct pattern matches only.
|
||||||
|
LoadSyntax
|
||||||
|
|
||||||
|
// LoadAllSyntax adds typed syntax trees for the packages matching the patterns
|
||||||
|
// and all dependencies.
|
||||||
|
// Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo,
|
||||||
|
// for all packages in the import graph.
|
||||||
|
LoadAllSyntax
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Config specifies details about how packages should be loaded.
|
||||||
|
// The zero value is a valid configuration.
|
||||||
|
// Calls to Load do not modify this struct.
|
||||||
|
type Config struct {
|
||||||
|
// Mode controls the level of information returned for each package.
|
||||||
|
Mode LoadMode
|
||||||
|
|
||||||
|
// Context specifies the context for the load operation.
|
||||||
|
// If the context is cancelled, the loader may stop early
|
||||||
|
// and return an ErrCancelled error.
|
||||||
|
// If Context is nil, the load cannot be cancelled.
|
||||||
|
Context context.Context
|
||||||
|
|
||||||
|
// Dir is the directory in which to run the build system's query tool
|
||||||
|
// that provides information about the packages.
|
||||||
|
// If Dir is empty, the tool is run in the current directory.
|
||||||
|
Dir string
|
||||||
|
|
||||||
|
// Env is the environment to use when invoking the build system's query tool.
|
||||||
|
// If Env is nil, the current environment is used.
|
||||||
|
// As in os/exec's Cmd, only the last value in the slice for
|
||||||
|
// each environment key is used. To specify the setting of only
|
||||||
|
// a few variables, append to the current environment, as in:
|
||||||
|
//
|
||||||
|
// opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
|
||||||
|
//
|
||||||
|
Env []string
|
||||||
|
|
||||||
|
// BuildFlags is a list of command-line flags to be passed through to
|
||||||
|
// the build system's query tool.
|
||||||
|
BuildFlags []string
|
||||||
|
|
||||||
|
// Fset provides source position information for syntax trees and types.
|
||||||
|
// If Fset is nil, the loader will create a new FileSet.
|
||||||
|
Fset *token.FileSet
|
||||||
|
|
||||||
|
// ParseFile is called to read and parse each file
|
||||||
|
// when preparing a package's type-checked syntax tree.
|
||||||
|
// It must be safe to call ParseFile simultaneously from multiple goroutines.
|
||||||
|
// If ParseFile is nil, the loader will uses parser.ParseFile.
|
||||||
|
//
|
||||||
|
// ParseFile should parse the source from src and use filename only for
|
||||||
|
// recording position information.
|
||||||
|
//
|
||||||
|
// An application may supply a custom implementation of ParseFile
|
||||||
|
// to change the effective file contents or the behavior of the parser,
|
||||||
|
// or to modify the syntax tree. For example, selectively eliminating
|
||||||
|
// unwanted function bodies can significantly accelerate type checking.
|
||||||
|
ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
|
||||||
|
|
||||||
|
// If Tests is set, the loader includes not just the packages
|
||||||
|
// matching a particular pattern but also any related test packages,
|
||||||
|
// including test-only variants of the package and the test executable.
|
||||||
|
//
|
||||||
|
// For example, when using the go command, loading "fmt" with Tests=true
|
||||||
|
// returns four packages, with IDs "fmt" (the standard package),
|
||||||
|
// "fmt [fmt.test]" (the package as compiled for the test),
|
||||||
|
// "fmt_test" (the test functions from source files in package fmt_test),
|
||||||
|
// and "fmt.test" (the test binary).
|
||||||
|
//
|
||||||
|
// In build systems with explicit names for tests,
|
||||||
|
// setting Tests may have no effect.
|
||||||
|
Tests bool
|
||||||
|
|
||||||
|
// Overlay provides a mapping of absolute file paths to file contents.
|
||||||
|
// If the file with the given path already exists, the parser will use the
|
||||||
|
// alternative file contents provided by the map.
|
||||||
|
//
|
||||||
|
// Overlays provide incomplete support for when a given file doesn't
|
||||||
|
// already exist on disk. See the package doc above for more details.
|
||||||
|
Overlay map[string][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// driver is the type for functions that query the build system for the
|
||||||
|
// packages named by the patterns.
|
||||||
|
type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
|
||||||
|
|
||||||
|
// driverResponse contains the results for a driver query.
|
||||||
|
type driverResponse struct {
|
||||||
|
// Sizes, if not nil, is the types.Sizes to use when type checking.
|
||||||
|
Sizes *types.StdSizes
|
||||||
|
|
||||||
|
// Roots is the set of package IDs that make up the root packages.
|
||||||
|
// We have to encode this separately because when we encode a single package
|
||||||
|
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||||
|
// graph it is part of.
|
||||||
|
Roots []string `json:",omitempty"`
|
||||||
|
|
||||||
|
// Packages is the full set of packages in the graph.
|
||||||
|
// The packages are not connected into a graph.
|
||||||
|
// The Imports if populated will be stubs that only have their ID set.
|
||||||
|
// Imports will be connected and then type and syntax information added in a
|
||||||
|
// later pass (see refine).
|
||||||
|
Packages []*Package
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load loads and returns the Go packages named by the given patterns.
|
||||||
|
//
|
||||||
|
// Config specifies loading options;
|
||||||
|
// nil behaves the same as an empty Config.
|
||||||
|
//
|
||||||
|
// Load returns an error if any of the patterns was invalid
|
||||||
|
// as defined by the underlying build system.
|
||||||
|
// It may return an empty list of packages without an error,
|
||||||
|
// for instance for an empty expansion of a valid wildcard.
|
||||||
|
// Errors associated with a particular package are recorded in the
|
||||||
|
// corresponding Package's Errors list, and do not cause Load to
|
||||||
|
// return an error. Clients may need to handle such errors before
|
||||||
|
// proceeding with further analysis. The PrintErrors function is
|
||||||
|
// provided for convenient display of all errors.
|
||||||
|
func Load(cfg *Config, patterns ...string) ([]*Package, error) {
|
||||||
|
l := newLoader(cfg)
|
||||||
|
response, err := defaultDriver(&l.Config, patterns...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
l.sizes = response.Sizes
|
||||||
|
return l.refine(response.Roots, response.Packages...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultDriver is a driver that looks for an external driver binary, and if
|
||||||
|
// it does not find it falls back to the built in go list driver.
|
||||||
|
func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||||
|
driver := findExternalDriver(cfg)
|
||||||
|
if driver == nil {
|
||||||
|
driver = goListDriver
|
||||||
|
}
|
||||||
|
return driver(cfg, patterns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Package describes a loaded Go package.
|
||||||
|
type Package struct {
|
||||||
|
// ID is a unique identifier for a package,
|
||||||
|
// in a syntax provided by the underlying build system.
|
||||||
|
//
|
||||||
|
// Because the syntax varies based on the build system,
|
||||||
|
// clients should treat IDs as opaque and not attempt to
|
||||||
|
// interpret them.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Name is the package name as it appears in the package source code.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// PkgPath is the package path as used by the go/types package.
|
||||||
|
PkgPath string
|
||||||
|
|
||||||
|
// Errors contains any errors encountered querying the metadata
|
||||||
|
// of the package, or while parsing or type-checking its files.
|
||||||
|
Errors []Error
|
||||||
|
|
||||||
|
// GoFiles lists the absolute file paths of the package's Go source files.
|
||||||
|
GoFiles []string
|
||||||
|
|
||||||
|
// CompiledGoFiles lists the absolute file paths of the package's source
|
||||||
|
// files that were presented to the compiler.
|
||||||
|
// This may differ from GoFiles if files are processed before compilation.
|
||||||
|
CompiledGoFiles []string
|
||||||
|
|
||||||
|
// OtherFiles lists the absolute file paths of the package's non-Go source files,
|
||||||
|
// including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
|
||||||
|
OtherFiles []string
|
||||||
|
|
||||||
|
// ExportFile is the absolute path to a file containing type
|
||||||
|
// information for the package as provided by the build system.
|
||||||
|
ExportFile string
|
||||||
|
|
||||||
|
// Imports maps import paths appearing in the package's Go source files
|
||||||
|
// to corresponding loaded Packages.
|
||||||
|
Imports map[string]*Package
|
||||||
|
|
||||||
|
// Types provides type information for the package.
|
||||||
|
// Modes LoadTypes and above set this field for packages matching the
|
||||||
|
// patterns; type information for dependencies may be missing or incomplete.
|
||||||
|
// Mode LoadAllSyntax sets this field for all packages, including dependencies.
|
||||||
|
Types *types.Package
|
||||||
|
|
||||||
|
// Fset provides position information for Types, TypesInfo, and Syntax.
|
||||||
|
// It is set only when Types is set.
|
||||||
|
Fset *token.FileSet
|
||||||
|
|
||||||
|
// IllTyped indicates whether the package or any dependency contains errors.
|
||||||
|
// It is set only when Types is set.
|
||||||
|
IllTyped bool
|
||||||
|
|
||||||
|
// Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
|
||||||
|
//
|
||||||
|
// Mode LoadSyntax sets this field for packages matching the patterns.
|
||||||
|
// Mode LoadAllSyntax sets this field for all packages, including dependencies.
|
||||||
|
Syntax []*ast.File
|
||||||
|
|
||||||
|
// TypesInfo provides type information about the package's syntax trees.
|
||||||
|
// It is set only when Syntax is set.
|
||||||
|
TypesInfo *types.Info
|
||||||
|
|
||||||
|
// TypesSizes provides the effective size function for types in TypesInfo.
|
||||||
|
TypesSizes types.Sizes
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Error describes a problem with a package's metadata, syntax, or types.
|
||||||
|
type Error struct {
|
||||||
|
Pos string // "file:line:col" or "file:line" or "" or "-"
|
||||||
|
Msg string
|
||||||
|
Kind ErrorKind
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorKind describes the source of the error, allowing the user to
|
||||||
|
// differentiate between errors generated by the driver, the parser, or the
|
||||||
|
// type-checker.
|
||||||
|
type ErrorKind int
|
||||||
|
|
||||||
|
const (
|
||||||
|
UnknownError ErrorKind = iota
|
||||||
|
ListError
|
||||||
|
ParseError
|
||||||
|
TypeError
|
||||||
|
)
|
||||||
|
|
||||||
|
func (err Error) Error() string {
|
||||||
|
pos := err.Pos
|
||||||
|
if pos == "" {
|
||||||
|
pos = "-" // like token.Position{}.String()
|
||||||
|
}
|
||||||
|
return pos + ": " + err.Msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// flatPackage is the JSON form of Package
|
||||||
|
// It drops all the type and syntax fields, and transforms the Imports
|
||||||
|
//
|
||||||
|
// TODO(adonovan): identify this struct with Package, effectively
|
||||||
|
// publishing the JSON protocol.
|
||||||
|
type flatPackage struct {
|
||||||
|
ID string
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
PkgPath string `json:",omitempty"`
|
||||||
|
Errors []Error `json:",omitempty"`
|
||||||
|
GoFiles []string `json:",omitempty"`
|
||||||
|
CompiledGoFiles []string `json:",omitempty"`
|
||||||
|
OtherFiles []string `json:",omitempty"`
|
||||||
|
ExportFile string `json:",omitempty"`
|
||||||
|
Imports map[string]string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the Package in its JSON form.
|
||||||
|
// For the most part, the structure fields are written out unmodified, and
|
||||||
|
// the type and syntax fields are skipped.
|
||||||
|
// The imports are written out as just a map of path to package id.
|
||||||
|
// The errors are written using a custom type that tries to preserve the
|
||||||
|
// structure of error types we know about.
|
||||||
|
//
|
||||||
|
// This method exists to enable support for additional build systems. It is
|
||||||
|
// not intended for use by clients of the API and we may change the format.
|
||||||
|
func (p *Package) MarshalJSON() ([]byte, error) {
|
||||||
|
flat := &flatPackage{
|
||||||
|
ID: p.ID,
|
||||||
|
Name: p.Name,
|
||||||
|
PkgPath: p.PkgPath,
|
||||||
|
Errors: p.Errors,
|
||||||
|
GoFiles: p.GoFiles,
|
||||||
|
CompiledGoFiles: p.CompiledGoFiles,
|
||||||
|
OtherFiles: p.OtherFiles,
|
||||||
|
ExportFile: p.ExportFile,
|
||||||
|
}
|
||||||
|
if len(p.Imports) > 0 {
|
||||||
|
flat.Imports = make(map[string]string, len(p.Imports))
|
||||||
|
for path, ipkg := range p.Imports {
|
||||||
|
flat.Imports[path] = ipkg.ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return json.Marshal(flat)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON reads in a Package from its JSON format.
|
||||||
|
// See MarshalJSON for details about the format accepted.
|
||||||
|
func (p *Package) UnmarshalJSON(b []byte) error {
|
||||||
|
flat := &flatPackage{}
|
||||||
|
if err := json.Unmarshal(b, &flat); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*p = Package{
|
||||||
|
ID: flat.ID,
|
||||||
|
Name: flat.Name,
|
||||||
|
PkgPath: flat.PkgPath,
|
||||||
|
Errors: flat.Errors,
|
||||||
|
GoFiles: flat.GoFiles,
|
||||||
|
CompiledGoFiles: flat.CompiledGoFiles,
|
||||||
|
OtherFiles: flat.OtherFiles,
|
||||||
|
ExportFile: flat.ExportFile,
|
||||||
|
}
|
||||||
|
if len(flat.Imports) > 0 {
|
||||||
|
p.Imports = make(map[string]*Package, len(flat.Imports))
|
||||||
|
for path, id := range flat.Imports {
|
||||||
|
p.Imports[path] = &Package{ID: id}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Package) String() string { return p.ID }
|
||||||
|
|
||||||
|
// loaderPackage augments Package with state used during the loading phase
|
||||||
|
type loaderPackage struct {
|
||||||
|
*Package
|
||||||
|
importErrors map[string]error // maps each bad import to its error
|
||||||
|
loadOnce sync.Once
|
||||||
|
color uint8 // for cycle detection
|
||||||
|
needsrc bool // load from source (Mode >= LoadTypes)
|
||||||
|
needtypes bool // type information is either requested or depended on
|
||||||
|
initial bool // package was matched by a pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
// loader holds the working state of a single call to load.
|
||||||
|
type loader struct {
|
||||||
|
pkgs map[string]*loaderPackage
|
||||||
|
Config
|
||||||
|
sizes types.Sizes
|
||||||
|
exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLoader(cfg *Config) *loader {
|
||||||
|
ld := &loader{}
|
||||||
|
if cfg != nil {
|
||||||
|
ld.Config = *cfg
|
||||||
|
}
|
||||||
|
if ld.Config.Env == nil {
|
||||||
|
ld.Config.Env = os.Environ()
|
||||||
|
}
|
||||||
|
if ld.Context == nil {
|
||||||
|
ld.Context = context.Background()
|
||||||
|
}
|
||||||
|
if ld.Dir == "" {
|
||||||
|
if dir, err := os.Getwd(); err == nil {
|
||||||
|
ld.Dir = dir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ld.Mode >= LoadTypes {
|
||||||
|
if ld.Fset == nil {
|
||||||
|
ld.Fset = token.NewFileSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseFile is required even in LoadTypes mode
|
||||||
|
// because we load source if export data is missing.
|
||||||
|
if ld.ParseFile == nil {
|
||||||
|
ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
|
||||||
|
var isrc interface{}
|
||||||
|
if src != nil {
|
||||||
|
isrc = src
|
||||||
|
}
|
||||||
|
const mode = parser.AllErrors | parser.ParseComments
|
||||||
|
return parser.ParseFile(fset, filename, isrc, mode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ld
|
||||||
|
}
|
||||||
|
|
||||||
|
// refine connects the supplied packages into a graph and then adds type and
|
||||||
|
// and syntax information as requested by the LoadMode.
|
||||||
|
func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||||
|
rootMap := make(map[string]int, len(roots))
|
||||||
|
for i, root := range roots {
|
||||||
|
rootMap[root] = i
|
||||||
|
}
|
||||||
|
ld.pkgs = make(map[string]*loaderPackage)
|
||||||
|
// first pass, fixup and build the map and roots
|
||||||
|
var initial = make([]*loaderPackage, len(roots))
|
||||||
|
for _, pkg := range list {
|
||||||
|
rootIndex := -1
|
||||||
|
if i, found := rootMap[pkg.ID]; found {
|
||||||
|
rootIndex = i
|
||||||
|
}
|
||||||
|
lpkg := &loaderPackage{
|
||||||
|
Package: pkg,
|
||||||
|
needtypes: ld.Mode >= LoadAllSyntax ||
|
||||||
|
ld.Mode >= LoadTypes && rootIndex >= 0,
|
||||||
|
needsrc: ld.Mode >= LoadAllSyntax ||
|
||||||
|
ld.Mode >= LoadSyntax && rootIndex >= 0 ||
|
||||||
|
len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
|
||||||
|
pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
|
||||||
|
}
|
||||||
|
ld.pkgs[lpkg.ID] = lpkg
|
||||||
|
if rootIndex >= 0 {
|
||||||
|
initial[rootIndex] = lpkg
|
||||||
|
lpkg.initial = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, root := range roots {
|
||||||
|
if initial[i] == nil {
|
||||||
|
return nil, fmt.Errorf("root package %v is missing", root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Materialize the import graph.
|
||||||
|
|
||||||
|
const (
|
||||||
|
white = 0 // new
|
||||||
|
grey = 1 // in progress
|
||||||
|
black = 2 // complete
|
||||||
|
)
|
||||||
|
|
||||||
|
// visit traverses the import graph, depth-first,
|
||||||
|
// and materializes the graph as Packages.Imports.
|
||||||
|
//
|
||||||
|
// Valid imports are saved in the Packages.Import map.
|
||||||
|
// Invalid imports (cycles and missing nodes) are saved in the importErrors map.
|
||||||
|
// Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
|
||||||
|
//
|
||||||
|
// visit returns whether the package needs src or has a transitive
|
||||||
|
// dependency on a package that does. These are the only packages
|
||||||
|
// for which we load source code.
|
||||||
|
var stack []*loaderPackage
|
||||||
|
var visit func(lpkg *loaderPackage) bool
|
||||||
|
var srcPkgs []*loaderPackage
|
||||||
|
visit = func(lpkg *loaderPackage) bool {
|
||||||
|
switch lpkg.color {
|
||||||
|
case black:
|
||||||
|
return lpkg.needsrc
|
||||||
|
case grey:
|
||||||
|
panic("internal error: grey node")
|
||||||
|
}
|
||||||
|
lpkg.color = grey
|
||||||
|
stack = append(stack, lpkg) // push
|
||||||
|
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
|
||||||
|
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||||
|
for importPath, ipkg := range stubs {
|
||||||
|
var importErr error
|
||||||
|
imp := ld.pkgs[ipkg.ID]
|
||||||
|
if imp == nil {
|
||||||
|
// (includes package "C" when DisableCgo)
|
||||||
|
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||||
|
} else if imp.color == grey {
|
||||||
|
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||||
|
}
|
||||||
|
if importErr != nil {
|
||||||
|
if lpkg.importErrors == nil {
|
||||||
|
lpkg.importErrors = make(map[string]error)
|
||||||
|
}
|
||||||
|
lpkg.importErrors[importPath] = importErr
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if visit(imp) {
|
||||||
|
lpkg.needsrc = true
|
||||||
|
}
|
||||||
|
lpkg.Imports[importPath] = imp.Package
|
||||||
|
}
|
||||||
|
if lpkg.needsrc {
|
||||||
|
srcPkgs = append(srcPkgs, lpkg)
|
||||||
|
}
|
||||||
|
stack = stack[:len(stack)-1] // pop
|
||||||
|
lpkg.color = black
|
||||||
|
|
||||||
|
return lpkg.needsrc
|
||||||
|
}
|
||||||
|
|
||||||
|
if ld.Mode < LoadImports {
|
||||||
|
//we do this to drop the stub import packages that we are not even going to try to resolve
|
||||||
|
for _, lpkg := range initial {
|
||||||
|
lpkg.Imports = nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For each initial package, create its import DAG.
|
||||||
|
for _, lpkg := range initial {
|
||||||
|
visit(lpkg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, lpkg := range srcPkgs {
|
||||||
|
// Complete type information is required for the
|
||||||
|
// immediate dependencies of each source package.
|
||||||
|
for _, ipkg := range lpkg.Imports {
|
||||||
|
imp := ld.pkgs[ipkg.ID]
|
||||||
|
imp.needtypes = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Load type data if needed, starting at
|
||||||
|
// the initial packages (roots of the import DAG).
|
||||||
|
if ld.Mode >= LoadTypes {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, lpkg := range initial {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(lpkg *loaderPackage) {
|
||||||
|
ld.loadRecursive(lpkg)
|
||||||
|
wg.Done()
|
||||||
|
}(lpkg)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]*Package, len(initial))
|
||||||
|
for i, lpkg := range initial {
|
||||||
|
result[i] = lpkg.Package
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadRecursive loads the specified package and its dependencies,
|
||||||
|
// recursively, in parallel, in topological order.
|
||||||
|
// It is atomic and idempotent.
|
||||||
|
// Precondition: ld.Mode >= LoadTypes.
|
||||||
|
func (ld *loader) loadRecursive(lpkg *loaderPackage) {
|
||||||
|
lpkg.loadOnce.Do(func() {
|
||||||
|
// Load the direct dependencies, in parallel.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, ipkg := range lpkg.Imports {
|
||||||
|
imp := ld.pkgs[ipkg.ID]
|
||||||
|
wg.Add(1)
|
||||||
|
go func(imp *loaderPackage) {
|
||||||
|
ld.loadRecursive(imp)
|
||||||
|
wg.Done()
|
||||||
|
}(imp)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
ld.loadPackage(lpkg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadPackage loads the specified package.
|
||||||
|
// It must be called only once per Package,
|
||||||
|
// after immediate dependencies are loaded.
|
||||||
|
// Precondition: ld.Mode >= LoadTypes.
|
||||||
|
func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||||
|
if lpkg.PkgPath == "unsafe" {
|
||||||
|
// Fill in the blanks to avoid surprises.
|
||||||
|
lpkg.Types = types.Unsafe
|
||||||
|
lpkg.Fset = ld.Fset
|
||||||
|
lpkg.Syntax = []*ast.File{}
|
||||||
|
lpkg.TypesInfo = new(types.Info)
|
||||||
|
lpkg.TypesSizes = ld.sizes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call NewPackage directly with explicit name.
|
||||||
|
// This avoids skew between golist and go/types when the files'
|
||||||
|
// package declarations are inconsistent.
|
||||||
|
lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
|
||||||
|
lpkg.Fset = ld.Fset
|
||||||
|
|
||||||
|
// Subtle: we populate all Types fields with an empty Package
|
||||||
|
// before loading export data so that export data processing
|
||||||
|
// never has to create a types.Package for an indirect dependency,
|
||||||
|
// which would then require that such created packages be explicitly
|
||||||
|
// inserted back into the Import graph as a final step after export data loading.
|
||||||
|
// The Diamond test exercises this case.
|
||||||
|
if !lpkg.needtypes {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !lpkg.needsrc {
|
||||||
|
ld.loadFromExportData(lpkg)
|
||||||
|
return // not a source package, don't get syntax trees
|
||||||
|
}
|
||||||
|
|
||||||
|
appendError := func(err error) {
|
||||||
|
// Convert various error types into the one true Error.
|
||||||
|
var errs []Error
|
||||||
|
switch err := err.(type) {
|
||||||
|
case Error:
|
||||||
|
// from driver
|
||||||
|
errs = append(errs, err)
|
||||||
|
|
||||||
|
case *os.PathError:
|
||||||
|
// from parser
|
||||||
|
errs = append(errs, Error{
|
||||||
|
Pos: err.Path + ":1",
|
||||||
|
Msg: err.Err.Error(),
|
||||||
|
Kind: ParseError,
|
||||||
|
})
|
||||||
|
|
||||||
|
case scanner.ErrorList:
|
||||||
|
// from parser
|
||||||
|
for _, err := range err {
|
||||||
|
errs = append(errs, Error{
|
||||||
|
Pos: err.Pos.String(),
|
||||||
|
Msg: err.Msg,
|
||||||
|
Kind: ParseError,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
case types.Error:
|
||||||
|
// from type checker
|
||||||
|
errs = append(errs, Error{
|
||||||
|
Pos: err.Fset.Position(err.Pos).String(),
|
||||||
|
Msg: err.Msg,
|
||||||
|
Kind: TypeError,
|
||||||
|
})
|
||||||
|
|
||||||
|
default:
|
||||||
|
// unexpected impoverished error from parser?
|
||||||
|
errs = append(errs, Error{
|
||||||
|
Pos: "-",
|
||||||
|
Msg: err.Error(),
|
||||||
|
Kind: UnknownError,
|
||||||
|
})
|
||||||
|
|
||||||
|
// If you see this error message, please file a bug.
|
||||||
|
log.Printf("internal error: error %q (%T) without position", err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lpkg.Errors = append(lpkg.Errors, errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
|
||||||
|
for _, err := range errs {
|
||||||
|
appendError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lpkg.Syntax = files
|
||||||
|
|
||||||
|
lpkg.TypesInfo = &types.Info{
|
||||||
|
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||||
|
Defs: make(map[*ast.Ident]types.Object),
|
||||||
|
Uses: make(map[*ast.Ident]types.Object),
|
||||||
|
Implicits: make(map[ast.Node]types.Object),
|
||||||
|
Scopes: make(map[ast.Node]*types.Scope),
|
||||||
|
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||||
|
}
|
||||||
|
lpkg.TypesSizes = ld.sizes
|
||||||
|
|
||||||
|
importer := importerFunc(func(path string) (*types.Package, error) {
|
||||||
|
if path == "unsafe" {
|
||||||
|
return types.Unsafe, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The imports map is keyed by import path.
|
||||||
|
ipkg := lpkg.Imports[path]
|
||||||
|
if ipkg == nil {
|
||||||
|
if err := lpkg.importErrors[path]; err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// There was skew between the metadata and the
|
||||||
|
// import declarations, likely due to an edit
|
||||||
|
// race, or because the ParseFile feature was
|
||||||
|
// used to supply alternative file contents.
|
||||||
|
return nil, fmt.Errorf("no metadata for %s", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ipkg.Types != nil && ipkg.Types.Complete() {
|
||||||
|
return ipkg.Types, nil
|
||||||
|
}
|
||||||
|
log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg)
|
||||||
|
panic("unreachable")
|
||||||
|
})
|
||||||
|
|
||||||
|
// type-check
|
||||||
|
tc := &types.Config{
|
||||||
|
Importer: importer,
|
||||||
|
|
||||||
|
// Type-check bodies of functions only in non-initial packages.
|
||||||
|
// Example: for import graph A->B->C and initial packages {A,C},
|
||||||
|
// we can ignore function bodies in B.
|
||||||
|
IgnoreFuncBodies: ld.Mode < LoadAllSyntax && !lpkg.initial,
|
||||||
|
|
||||||
|
Error: appendError,
|
||||||
|
Sizes: ld.sizes,
|
||||||
|
}
|
||||||
|
types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
|
||||||
|
|
||||||
|
lpkg.importErrors = nil // no longer needed
|
||||||
|
|
||||||
|
// If !Cgo, the type-checker uses FakeImportC mode, so
|
||||||
|
// it doesn't invoke the importer for import "C",
|
||||||
|
// nor report an error for the import,
|
||||||
|
// or for any undefined C.f reference.
|
||||||
|
// We must detect this explicitly and correctly
|
||||||
|
// mark the package as IllTyped (by reporting an error).
|
||||||
|
// TODO(adonovan): if these errors are annoying,
|
||||||
|
// we could just set IllTyped quietly.
|
||||||
|
if tc.FakeImportC {
|
||||||
|
outer:
|
||||||
|
for _, f := range lpkg.Syntax {
|
||||||
|
for _, imp := range f.Imports {
|
||||||
|
if imp.Path.Value == `"C"` {
|
||||||
|
err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
|
||||||
|
appendError(err)
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record accumulated errors.
|
||||||
|
illTyped := len(lpkg.Errors) > 0
|
||||||
|
if !illTyped {
|
||||||
|
for _, imp := range lpkg.Imports {
|
||||||
|
if imp.IllTyped {
|
||||||
|
illTyped = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lpkg.IllTyped = illTyped
|
||||||
|
}
|
||||||
|
|
||||||
|
// An importFunc is an implementation of the single-method
|
||||||
|
// types.Importer interface based on a function value.
|
||||||
|
type importerFunc func(path string) (*types.Package, error)
|
||||||
|
|
||||||
|
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
|
||||||
|
|
||||||
|
// We use a counting semaphore to limit
|
||||||
|
// the number of parallel I/O calls per process.
|
||||||
|
var ioLimit = make(chan bool, 20)
|
||||||
|
|
||||||
|
// parseFiles reads and parses the Go source files and returns the ASTs
|
||||||
|
// of the ones that could be at least partially parsed, along with a
|
||||||
|
// list of I/O and parse errors encountered.
|
||||||
|
//
|
||||||
|
// Because files are scanned in parallel, the token.Pos
|
||||||
|
// positions of the resulting ast.Files are not ordered.
|
||||||
|
//
|
||||||
|
func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
n := len(filenames)
|
||||||
|
parsed := make([]*ast.File, n)
|
||||||
|
errors := make([]error, n)
|
||||||
|
for i, file := range filenames {
|
||||||
|
if ld.Config.Context.Err() != nil {
|
||||||
|
parsed[i] = nil
|
||||||
|
errors[i] = ld.Config.Context.Err()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func(i int, filename string) {
|
||||||
|
ioLimit <- true // wait
|
||||||
|
// ParseFile may return both an AST and an error.
|
||||||
|
var src []byte
|
||||||
|
for f, contents := range ld.Config.Overlay {
|
||||||
|
if sameFile(f, filename) {
|
||||||
|
src = contents
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if src == nil {
|
||||||
|
src, err = ioutil.ReadFile(filename)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
parsed[i], errors[i] = nil, err
|
||||||
|
} else {
|
||||||
|
parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename, src)
|
||||||
|
}
|
||||||
|
<-ioLimit // signal
|
||||||
|
wg.Done()
|
||||||
|
}(i, file)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Eliminate nils, preserving order.
|
||||||
|
var o int
|
||||||
|
for _, f := range parsed {
|
||||||
|
if f != nil {
|
||||||
|
parsed[o] = f
|
||||||
|
o++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parsed = parsed[:o]
|
||||||
|
|
||||||
|
o = 0
|
||||||
|
for _, err := range errors {
|
||||||
|
if err != nil {
|
||||||
|
errors[o] = err
|
||||||
|
o++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
errors = errors[:o]
|
||||||
|
|
||||||
|
return parsed, errors
|
||||||
|
}
|
||||||
|
|
||||||
|
// sameFile returns true if x and y have the same basename and denote
|
||||||
|
// the same file.
|
||||||
|
//
|
||||||
|
func sameFile(x, y string) bool {
|
||||||
|
if x == y {
|
||||||
|
// It could be the case that y doesn't exist.
|
||||||
|
// For instance, it may be an overlay file that
|
||||||
|
// hasn't been written to disk. To handle that case
|
||||||
|
// let x == y through. (We added the exact absolute path
|
||||||
|
// string to the CompiledGoFiles list, so the unwritten
|
||||||
|
// overlay case implies x==y.)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if filepath.Base(x) == filepath.Base(y) { // (optimisation)
|
||||||
|
if xi, err := os.Stat(x); err == nil {
|
||||||
|
if yi, err := os.Stat(y); err == nil {
|
||||||
|
return os.SameFile(xi, yi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadFromExportData returns type information for the specified
|
||||||
|
// package, loading it from an export data file on the first request.
|
||||||
|
func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
|
||||||
|
if lpkg.PkgPath == "" {
|
||||||
|
log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because gcexportdata.Read has the potential to create or
|
||||||
|
// modify the types.Package for each node in the transitive
|
||||||
|
// closure of dependencies of lpkg, all exportdata operations
|
||||||
|
// must be sequential. (Finer-grained locking would require
|
||||||
|
// changes to the gcexportdata API.)
|
||||||
|
//
|
||||||
|
// The exportMu lock guards the Package.Pkg field and the
|
||||||
|
// types.Package it points to, for each Package in the graph.
|
||||||
|
//
|
||||||
|
// Not all accesses to Package.Pkg need to be protected by exportMu:
|
||||||
|
// graph ordering ensures that direct dependencies of source
|
||||||
|
// packages are fully loaded before the importer reads their Pkg field.
|
||||||
|
ld.exportMu.Lock()
|
||||||
|
defer ld.exportMu.Unlock()
|
||||||
|
|
||||||
|
if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
|
||||||
|
return tpkg, nil // cache hit
|
||||||
|
}
|
||||||
|
|
||||||
|
lpkg.IllTyped = true // fail safe
|
||||||
|
|
||||||
|
if lpkg.ExportFile == "" {
|
||||||
|
// Errors while building export data will have been printed to stderr.
|
||||||
|
return nil, fmt.Errorf("no export data file")
|
||||||
|
}
|
||||||
|
f, err := os.Open(lpkg.ExportFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Read gc export data.
|
||||||
|
//
|
||||||
|
// We don't currently support gccgo export data because all
|
||||||
|
// underlying workspaces use the gc toolchain. (Even build
|
||||||
|
// systems that support gccgo don't use it for workspace
|
||||||
|
// queries.)
|
||||||
|
r, err := gcexportdata.NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the view.
|
||||||
|
//
|
||||||
|
// The gcexportdata machinery has no concept of package ID.
|
||||||
|
// It identifies packages by their PkgPath, which although not
|
||||||
|
// globally unique is unique within the scope of one invocation
|
||||||
|
// of the linker, type-checker, or gcexportdata.
|
||||||
|
//
|
||||||
|
// So, we must build a PkgPath-keyed view of the global
|
||||||
|
// (conceptually ID-keyed) cache of packages and pass it to
|
||||||
|
// gcexportdata. The view must contain every existing
|
||||||
|
// package that might possibly be mentioned by the
|
||||||
|
// current package---its transitive closure.
|
||||||
|
//
|
||||||
|
// In loadPackage, we unconditionally create a types.Package for
|
||||||
|
// each dependency so that export data loading does not
|
||||||
|
// create new ones.
|
||||||
|
//
|
||||||
|
// TODO(adonovan): it would be simpler and more efficient
|
||||||
|
// if the export data machinery invoked a callback to
|
||||||
|
// get-or-create a package instead of a map.
|
||||||
|
//
|
||||||
|
view := make(map[string]*types.Package) // view seen by gcexportdata
|
||||||
|
seen := make(map[*loaderPackage]bool) // all visited packages
|
||||||
|
var visit func(pkgs map[string]*Package)
|
||||||
|
visit = func(pkgs map[string]*Package) {
|
||||||
|
for _, p := range pkgs {
|
||||||
|
lpkg := ld.pkgs[p.ID]
|
||||||
|
if !seen[lpkg] {
|
||||||
|
seen[lpkg] = true
|
||||||
|
view[lpkg.PkgPath] = lpkg.Types
|
||||||
|
visit(lpkg.Imports)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
visit(lpkg.Imports)
|
||||||
|
|
||||||
|
viewLen := len(view) + 1 // adding the self package
|
||||||
|
// Parse the export data.
|
||||||
|
// (May modify incomplete packages in view but not create new ones.)
|
||||||
|
tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||||
|
}
|
||||||
|
if viewLen != len(view) {
|
||||||
|
log.Fatalf("Unexpected package creation during export data loading")
|
||||||
|
}
|
||||||
|
|
||||||
|
lpkg.Types = tpkg
|
||||||
|
lpkg.IllTyped = false
|
||||||
|
|
||||||
|
return tpkg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func usesExportData(cfg *Config) bool {
|
||||||
|
return LoadTypes <= cfg.Mode && cfg.Mode < LoadAllSyntax
|
||||||
|
}
|
||||||
55
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
Normal file
55
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package packages
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Visit visits all the packages in the import graph whose roots are
|
||||||
|
// pkgs, calling the optional pre function the first time each package
|
||||||
|
// is encountered (preorder), and the optional post function after a
|
||||||
|
// package's dependencies have been visited (postorder).
|
||||||
|
// The boolean result of pre(pkg) determines whether
|
||||||
|
// the imports of package pkg are visited.
|
||||||
|
func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
|
||||||
|
seen := make(map[*Package]bool)
|
||||||
|
var visit func(*Package)
|
||||||
|
visit = func(pkg *Package) {
|
||||||
|
if !seen[pkg] {
|
||||||
|
seen[pkg] = true
|
||||||
|
|
||||||
|
if pre == nil || pre(pkg) {
|
||||||
|
paths := make([]string, 0, len(pkg.Imports))
|
||||||
|
for path := range pkg.Imports {
|
||||||
|
paths = append(paths, path)
|
||||||
|
}
|
||||||
|
sort.Strings(paths) // Imports is a map, this makes visit stable
|
||||||
|
for _, path := range paths {
|
||||||
|
visit(pkg.Imports[path])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if post != nil {
|
||||||
|
post(pkg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, pkg := range pkgs {
|
||||||
|
visit(pkg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintErrors prints to os.Stderr the accumulated errors of all
|
||||||
|
// packages in the import graph rooted at pkgs, dependencies first.
|
||||||
|
// PrintErrors returns the number of errors printed.
|
||||||
|
func PrintErrors(pkgs []*Package) int {
|
||||||
|
var n int
|
||||||
|
Visit(pkgs, nil, func(pkg *Package) {
|
||||||
|
for _, err := range pkg.Errors {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return n
|
||||||
|
}
|
||||||
77
vendor/golang.org/x/tools/go/vcs/vcs.go
generated
vendored
77
vendor/golang.org/x/tools/go/vcs/vcs.go
generated
vendored
@@ -2,6 +2,16 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package vcs exposes functions for resolving import paths
|
||||||
|
// and using version control systems, which can be used to
|
||||||
|
// implement behavior similar to the standard "go get" command.
|
||||||
|
//
|
||||||
|
// This package is a copy of internal code in package cmd/go/internal/get,
|
||||||
|
// modified to make the identifiers exported. It's provided here
|
||||||
|
// for developers who want to write tools with similar semantics.
|
||||||
|
// It needs to be manually kept in sync with upstream when changes are
|
||||||
|
// made to cmd/go/internal/get; see https://golang.org/issue/11490.
|
||||||
|
//
|
||||||
package vcs
|
package vcs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -10,6 +20,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -344,11 +355,28 @@ func FromDir(dir, srcRoot string) (vcs *Cmd, root string, err error) {
|
|||||||
return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
|
return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var vcsRet *Cmd
|
||||||
|
var rootRet string
|
||||||
|
|
||||||
origDir := dir
|
origDir := dir
|
||||||
for len(dir) > len(srcRoot) {
|
for len(dir) > len(srcRoot) {
|
||||||
for _, vcs := range vcsList {
|
for _, vcs := range vcsList {
|
||||||
if _, err := os.Stat(filepath.Join(dir, "."+vcs.Cmd)); err == nil {
|
if _, err := os.Stat(filepath.Join(dir, "."+vcs.Cmd)); err == nil {
|
||||||
return vcs, filepath.ToSlash(dir[len(srcRoot)+1:]), nil
|
root := filepath.ToSlash(dir[len(srcRoot)+1:])
|
||||||
|
// Record first VCS we find, but keep looking,
|
||||||
|
// to detect mistakes like one kind of VCS inside another.
|
||||||
|
if vcsRet == nil {
|
||||||
|
vcsRet = vcs
|
||||||
|
rootRet = root
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Allow .git inside .git, which can arise due to submodules.
|
||||||
|
if vcsRet == vcs && vcs.Cmd == "git" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Otherwise, we have one VCS inside a different VCS.
|
||||||
|
return nil, "", fmt.Errorf("directory %q uses %s, but parent %q uses %s",
|
||||||
|
filepath.Join(srcRoot, rootRet), vcsRet.Cmd, filepath.Join(srcRoot, root), vcs.Cmd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -361,6 +389,10 @@ func FromDir(dir, srcRoot string) (vcs *Cmd, root string, err error) {
|
|||||||
dir = ndir
|
dir = ndir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if vcsRet != nil {
|
||||||
|
return vcsRet, rootRet, nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir)
|
return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -535,8 +567,8 @@ func RepoRootForImportDynamic(importPath string, verbose bool) (*RepoRoot, error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.Contains(metaImport.RepoRoot, "://") {
|
if err := validateRepoRoot(metaImport.RepoRoot); err != nil {
|
||||||
return nil, fmt.Errorf("%s: invalid repo root %q; no scheme", urlStr, metaImport.RepoRoot)
|
return nil, fmt.Errorf("%s: invalid repo root %q: %v", urlStr, metaImport.RepoRoot, err)
|
||||||
}
|
}
|
||||||
rr := &RepoRoot{
|
rr := &RepoRoot{
|
||||||
VCS: ByCmd(metaImport.VCS),
|
VCS: ByCmd(metaImport.VCS),
|
||||||
@@ -549,6 +581,19 @@ func RepoRootForImportDynamic(importPath string, verbose bool) (*RepoRoot, error
|
|||||||
return rr, nil
|
return rr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateRepoRoot returns an error if repoRoot does not seem to be
|
||||||
|
// a valid URL with scheme.
|
||||||
|
func validateRepoRoot(repoRoot string) error {
|
||||||
|
url, err := url.Parse(repoRoot)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if url.Scheme == "" {
|
||||||
|
return errors.New("no scheme")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// metaImport represents the parsed <meta name="go-import"
|
// metaImport represents the parsed <meta name="go-import"
|
||||||
// content="prefix vcs reporoot" /> tags from HTML files.
|
// content="prefix vcs reporoot" /> tags from HTML files.
|
||||||
type metaImport struct {
|
type metaImport struct {
|
||||||
@@ -558,15 +603,28 @@ type metaImport struct {
|
|||||||
// errNoMatch is returned from matchGoImport when there's no applicable match.
|
// errNoMatch is returned from matchGoImport when there's no applicable match.
|
||||||
var errNoMatch = errors.New("no import match")
|
var errNoMatch = errors.New("no import match")
|
||||||
|
|
||||||
|
// pathPrefix reports whether sub is a prefix of s,
|
||||||
|
// only considering entire path components.
|
||||||
|
func pathPrefix(s, sub string) bool {
|
||||||
|
// strings.HasPrefix is necessary but not sufficient.
|
||||||
|
if !strings.HasPrefix(s, sub) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// The remainder after the prefix must either be empty or start with a slash.
|
||||||
|
rem := s[len(sub):]
|
||||||
|
return rem == "" || rem[0] == '/'
|
||||||
|
}
|
||||||
|
|
||||||
// matchGoImport returns the metaImport from imports matching importPath.
|
// matchGoImport returns the metaImport from imports matching importPath.
|
||||||
// An error is returned if there are multiple matches.
|
// An error is returned if there are multiple matches.
|
||||||
// errNoMatch is returned if none match.
|
// errNoMatch is returned if none match.
|
||||||
func matchGoImport(imports []metaImport, importPath string) (_ metaImport, err error) {
|
func matchGoImport(imports []metaImport, importPath string) (_ metaImport, err error) {
|
||||||
match := -1
|
match := -1
|
||||||
for i, im := range imports {
|
for i, im := range imports {
|
||||||
if !strings.HasPrefix(importPath, im.Prefix) {
|
if !pathPrefix(importPath, im.Prefix) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if match != -1 {
|
if match != -1 {
|
||||||
err = fmt.Errorf("multiple meta tags match import path %q", importPath)
|
err = fmt.Errorf("multiple meta tags match import path %q", importPath)
|
||||||
return
|
return
|
||||||
@@ -590,15 +648,6 @@ func expand(match map[string]string, s string) string {
|
|||||||
|
|
||||||
// vcsPaths lists the known vcs paths.
|
// vcsPaths lists the known vcs paths.
|
||||||
var vcsPaths = []*vcsPath{
|
var vcsPaths = []*vcsPath{
|
||||||
// go.googlesource.com
|
|
||||||
{
|
|
||||||
prefix: "go.googlesource.com",
|
|
||||||
re: `^(?P<root>go\.googlesource\.com/[A-Za-z0-9_.\-]+/?)$`,
|
|
||||||
vcs: "git",
|
|
||||||
repo: "https://{root}",
|
|
||||||
check: noVCSSuffix,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Github
|
// Github
|
||||||
{
|
{
|
||||||
prefix: "github.com/",
|
prefix: "github.com/",
|
||||||
@@ -673,7 +722,7 @@ func bitbucketVCS(match map[string]string) error {
|
|||||||
var resp struct {
|
var resp struct {
|
||||||
SCM string `json:"scm"`
|
SCM string `json:"scm"`
|
||||||
}
|
}
|
||||||
url := expand(match, "https://api.bitbucket.org/1.0/repositories/{bitname}")
|
url := expand(match, "https://api.bitbucket.org/2.0/repositories/{bitname}?fields=scm")
|
||||||
data, err := httpGET(url)
|
data, err := httpGET(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
13
vendor/golang.org/x/tools/imports/BUILD
generated
vendored
13
vendor/golang.org/x/tools/imports/BUILD
generated
vendored
@@ -3,20 +3,21 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"fastwalk.go",
|
|
||||||
"fastwalk_dirent_fileno.go",
|
|
||||||
"fastwalk_dirent_ino.go",
|
|
||||||
"fastwalk_portable.go",
|
|
||||||
"fastwalk_unix.go",
|
|
||||||
"fix.go",
|
"fix.go",
|
||||||
"imports.go",
|
"imports.go",
|
||||||
|
"mod.go",
|
||||||
"sortimports.go",
|
"sortimports.go",
|
||||||
"zstdlib.go",
|
"zstdlib.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/imports",
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/imports",
|
||||||
importpath = "golang.org/x/tools/imports",
|
importpath = "golang.org/x/tools/imports",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = ["//vendor/golang.org/x/tools/go/ast/astutil:go_default_library"],
|
deps = [
|
||||||
|
"//vendor/golang.org/x/tools/go/ast/astutil:go_default_library",
|
||||||
|
"//vendor/golang.org/x/tools/go/packages:go_default_library",
|
||||||
|
"//vendor/golang.org/x/tools/internal/gopathwalk:go_default_library",
|
||||||
|
"//vendor/golang.org/x/tools/internal/module:go_default_library",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
|
|||||||
1349
vendor/golang.org/x/tools/imports/fix.go
generated
vendored
1349
vendor/golang.org/x/tools/imports/fix.go
generated
vendored
File diff suppressed because it is too large
Load Diff
56
vendor/golang.org/x/tools/imports/imports.go
generated
vendored
56
vendor/golang.org/x/tools/imports/imports.go
generated
vendored
@@ -13,11 +13,13 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/ast"
|
"go/ast"
|
||||||
|
"go/build"
|
||||||
"go/format"
|
"go/format"
|
||||||
"go/parser"
|
"go/parser"
|
||||||
"go/printer"
|
"go/printer"
|
||||||
"go/token"
|
"go/token"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -44,9 +46,21 @@ type Options struct {
|
|||||||
// so it is important that filename be accurate.
|
// so it is important that filename be accurate.
|
||||||
// To process data ``as if'' it were in filename, pass the data as a non-nil src.
|
// To process data ``as if'' it were in filename, pass the data as a non-nil src.
|
||||||
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
||||||
|
env := &fixEnv{GOPATH: build.Default.GOPATH, GOROOT: build.Default.GOROOT}
|
||||||
|
return process(filename, src, opt, env)
|
||||||
|
}
|
||||||
|
|
||||||
|
func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, error) {
|
||||||
if opt == nil {
|
if opt == nil {
|
||||||
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
||||||
}
|
}
|
||||||
|
if src == nil {
|
||||||
|
b, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
src = b
|
||||||
|
}
|
||||||
|
|
||||||
fileSet := token.NewFileSet()
|
fileSet := token.NewFileSet()
|
||||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||||
@@ -55,15 +69,13 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !opt.FormatOnly {
|
if !opt.FormatOnly {
|
||||||
_, err = fixImports(fileSet, file, filename)
|
if err := fixImports(fileSet, file, filename, env); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sortImports(fileSet, file)
|
sortImports(fileSet, file)
|
||||||
imps := astutil.Imports(fileSet, file)
|
imps := astutil.Imports(fileSet, file)
|
||||||
|
|
||||||
var spacesBefore []string // import paths we need spaces before
|
var spacesBefore []string // import paths we need spaces before
|
||||||
for _, impSection := range imps {
|
for _, impSection := range imps {
|
||||||
// Within each block of contiguous imports, see if any
|
// Within each block of contiguous imports, see if any
|
||||||
@@ -98,7 +110,10 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
|||||||
out = adjust(src, out)
|
out = adjust(src, out)
|
||||||
}
|
}
|
||||||
if len(spacesBefore) > 0 {
|
if len(spacesBefore) > 0 {
|
||||||
out = addImportSpaces(bytes.NewReader(out), spacesBefore)
|
out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out, err = format.Source(out)
|
out, err = format.Source(out)
|
||||||
@@ -133,11 +148,18 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
|
|||||||
|
|
||||||
// If this is a declaration list, make it a source file
|
// If this is a declaration list, make it a source file
|
||||||
// by inserting a package clause.
|
// by inserting a package clause.
|
||||||
// Insert using a ;, not a newline, so that the line numbers
|
// Insert using a ;, not a newline, so that parse errors are on
|
||||||
// in psrc match the ones in src.
|
// the correct line.
|
||||||
psrc := append([]byte("package main;"), src...)
|
const prefix = "package main;"
|
||||||
|
psrc := append([]byte(prefix), src...)
|
||||||
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
|
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
// Gofmt will turn the ; into a \n.
|
||||||
|
// Do that ourselves now and update the file contents,
|
||||||
|
// so that positions and line numbers are correct going forward.
|
||||||
|
psrc[len(prefix)-1] = '\n'
|
||||||
|
fset.File(file.Package).SetLinesForContent(psrc)
|
||||||
|
|
||||||
// If a main function exists, we will assume this is a main
|
// If a main function exists, we will assume this is a main
|
||||||
// package and leave the file.
|
// package and leave the file.
|
||||||
if containsMainFunc(file) {
|
if containsMainFunc(file) {
|
||||||
@@ -146,8 +168,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
|
|||||||
|
|
||||||
adjust := func(orig, src []byte) []byte {
|
adjust := func(orig, src []byte) []byte {
|
||||||
// Remove the package clause.
|
// Remove the package clause.
|
||||||
// Gofmt has turned the ; into a \n.
|
src = src[len(prefix):]
|
||||||
src = src[len("package main\n"):]
|
|
||||||
return matchSpace(orig, src)
|
return matchSpace(orig, src)
|
||||||
}
|
}
|
||||||
return file, adjust, nil
|
return file, adjust, nil
|
||||||
@@ -256,13 +277,18 @@ func matchSpace(orig []byte, src []byte) []byte {
|
|||||||
|
|
||||||
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
|
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
|
||||||
|
|
||||||
func addImportSpaces(r io.Reader, breaks []string) []byte {
|
func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
sc := bufio.NewScanner(r)
|
in := bufio.NewReader(r)
|
||||||
inImports := false
|
inImports := false
|
||||||
done := false
|
done := false
|
||||||
for sc.Scan() {
|
for {
|
||||||
s := sc.Text()
|
s, err := in.ReadString('\n')
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if !inImports && !done && strings.HasPrefix(s, "import") {
|
if !inImports && !done && strings.HasPrefix(s, "import") {
|
||||||
inImports = true
|
inImports = true
|
||||||
@@ -283,7 +309,7 @@ func addImportSpaces(r io.Reader, breaks []string) []byte {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintln(&out, s)
|
fmt.Fprint(&out, s)
|
||||||
}
|
}
|
||||||
return out.Bytes()
|
return out.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|||||||
56
vendor/golang.org/x/tools/imports/mkstdlib.go
generated
vendored
56
vendor/golang.org/x/tools/imports/mkstdlib.go
generated
vendored
@@ -14,9 +14,9 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@@ -30,11 +30,13 @@ func mustOpen(name string) io.Reader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func api(base string) string {
|
func api(base string) string {
|
||||||
return filepath.Join(os.Getenv("GOROOT"), "api", base)
|
return filepath.Join(runtime.GOROOT(), "api", base)
|
||||||
}
|
}
|
||||||
|
|
||||||
var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
|
var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
|
||||||
|
|
||||||
|
var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
outf := func(format string, args ...interface{}) {
|
outf := func(format string, args ...interface{}) {
|
||||||
@@ -42,7 +44,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n")
|
outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n")
|
||||||
outf("package imports\n")
|
outf("package imports\n")
|
||||||
outf("var stdlib = map[string]string{\n")
|
outf("var stdlib = map[string]map[string]bool{\n")
|
||||||
f := io.MultiReader(
|
f := io.MultiReader(
|
||||||
mustOpen(api("go1.txt")),
|
mustOpen(api("go1.txt")),
|
||||||
mustOpen(api("go1.1.txt")),
|
mustOpen(api("go1.1.txt")),
|
||||||
@@ -53,11 +55,18 @@ func main() {
|
|||||||
mustOpen(api("go1.6.txt")),
|
mustOpen(api("go1.6.txt")),
|
||||||
mustOpen(api("go1.7.txt")),
|
mustOpen(api("go1.7.txt")),
|
||||||
mustOpen(api("go1.8.txt")),
|
mustOpen(api("go1.8.txt")),
|
||||||
|
mustOpen(api("go1.9.txt")),
|
||||||
|
mustOpen(api("go1.10.txt")),
|
||||||
|
mustOpen(api("go1.11.txt")),
|
||||||
|
mustOpen(api("go1.12.txt")),
|
||||||
)
|
)
|
||||||
sc := bufio.NewScanner(f)
|
sc := bufio.NewScanner(f)
|
||||||
fullImport := map[string]string{} // "zip.NewReader" => "archive/zip"
|
|
||||||
ambiguous := map[string]bool{}
|
pkgs := map[string]map[string]bool{
|
||||||
var keys []string
|
"unsafe": unsafeSyms,
|
||||||
|
}
|
||||||
|
paths := []string{"unsafe"}
|
||||||
|
|
||||||
for sc.Scan() {
|
for sc.Scan() {
|
||||||
l := sc.Text()
|
l := sc.Text()
|
||||||
has := func(v string) bool { return strings.Contains(l, v) }
|
has := func(v string) bool { return strings.Contains(l, v) }
|
||||||
@@ -65,32 +74,31 @@ func main() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if m := sym.FindStringSubmatch(l); m != nil {
|
if m := sym.FindStringSubmatch(l); m != nil {
|
||||||
full := m[1]
|
path, sym := m[1], m[2]
|
||||||
key := path.Base(full) + "." + m[2]
|
|
||||||
if exist, ok := fullImport[key]; ok {
|
if _, ok := pkgs[path]; !ok {
|
||||||
if exist != full {
|
pkgs[path] = map[string]bool{}
|
||||||
ambiguous[key] = true
|
paths = append(paths, path)
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fullImport[key] = full
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
}
|
||||||
|
pkgs[path][sym] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := sc.Err(); err != nil {
|
if err := sc.Err(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
sort.Strings(keys)
|
sort.Strings(paths)
|
||||||
for _, key := range keys {
|
for _, path := range paths {
|
||||||
if ambiguous[key] {
|
outf("\t%q: map[string]bool{\n", path)
|
||||||
outf("\t// %q is ambiguous\n", key)
|
pkg := pkgs[path]
|
||||||
} else {
|
var syms []string
|
||||||
outf("\t%q: %q,\n", key, fullImport[key])
|
for sym := range pkg {
|
||||||
|
syms = append(syms, sym)
|
||||||
}
|
}
|
||||||
|
sort.Strings(syms)
|
||||||
|
for _, sym := range syms {
|
||||||
|
outf("\t\t%q: true,\n", sym)
|
||||||
}
|
}
|
||||||
outf("\n")
|
outf("},\n")
|
||||||
for _, sym := range [...]string{"Alignof", "ArbitraryType", "Offsetof", "Pointer", "Sizeof"} {
|
|
||||||
outf("\t%q: %q,\n", "unsafe."+sym, "unsafe")
|
|
||||||
}
|
}
|
||||||
outf("}\n")
|
outf("}\n")
|
||||||
fmtbuf, err := format.Source(buf.Bytes())
|
fmtbuf, err := format.Source(buf.Bytes())
|
||||||
|
|||||||
351
vendor/golang.org/x/tools/imports/mod.go
generated
vendored
Normal file
351
vendor/golang.org/x/tools/imports/mod.go
generated
vendored
Normal file
@@ -0,0 +1,351 @@
|
|||||||
|
package imports
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/tools/internal/gopathwalk"
|
||||||
|
"golang.org/x/tools/internal/module"
|
||||||
|
)
|
||||||
|
|
||||||
|
// moduleResolver implements resolver for modules using the go command as little
|
||||||
|
// as feasible.
|
||||||
|
type moduleResolver struct {
|
||||||
|
env *fixEnv
|
||||||
|
|
||||||
|
main *moduleJSON
|
||||||
|
modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path...
|
||||||
|
modsByDir []*moduleJSON // ...or Dir.
|
||||||
|
}
|
||||||
|
|
||||||
|
type moduleJSON struct {
|
||||||
|
Path string // module path
|
||||||
|
Version string // module version
|
||||||
|
Versions []string // available module versions (with -versions)
|
||||||
|
Replace *moduleJSON // replaced by this module
|
||||||
|
Time *time.Time // time version was created
|
||||||
|
Update *moduleJSON // available update, if any (with -u)
|
||||||
|
Main bool // is this the main module?
|
||||||
|
Indirect bool // is this module only an indirect dependency of main module?
|
||||||
|
Dir string // directory holding files for this module, if any
|
||||||
|
GoMod string // path to go.mod file for this module, if any
|
||||||
|
Error *moduleErrorJSON // error loading module
|
||||||
|
}
|
||||||
|
|
||||||
|
type moduleErrorJSON struct {
|
||||||
|
Err string // the error itself
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *moduleResolver) init() error {
|
||||||
|
if r.main != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for dec := json.NewDecoder(stdout); dec.More(); {
|
||||||
|
mod := &moduleJSON{}
|
||||||
|
if err := dec.Decode(mod); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if mod.Dir == "" {
|
||||||
|
if Debug {
|
||||||
|
log.Printf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||||
|
}
|
||||||
|
// Can't do anything with a module that's not downloaded.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.modsByModPath = append(r.modsByModPath, mod)
|
||||||
|
r.modsByDir = append(r.modsByDir, mod)
|
||||||
|
if mod.Main {
|
||||||
|
r.main = mod
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||||
|
count := func(x int) int {
|
||||||
|
return strings.Count(r.modsByModPath[x].Path, "/")
|
||||||
|
}
|
||||||
|
return count(j) < count(i) // descending order
|
||||||
|
})
|
||||||
|
sort.Slice(r.modsByDir, func(i, j int) bool {
|
||||||
|
count := func(x int) int {
|
||||||
|
return strings.Count(r.modsByDir[x].Dir, "/")
|
||||||
|
}
|
||||||
|
return count(j) < count(i) // descending order
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findPackage returns the module and directory that contains the package at
|
||||||
|
// the given import path, or returns nil, "" if no module is in scope.
|
||||||
|
func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
|
||||||
|
for _, m := range r.modsByModPath {
|
||||||
|
if !strings.HasPrefix(importPath, m.Path) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pathInModule := importPath[len(m.Path):]
|
||||||
|
pkgDir := filepath.Join(m.Dir, pathInModule)
|
||||||
|
if dirIsNestedModule(pkgDir, m) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pkgFiles, err := ioutil.ReadDir(pkgDir)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// A module only contains a package if it has buildable go
|
||||||
|
// files in that directory. If not, it could be provided by an
|
||||||
|
// outer module. See #29736.
|
||||||
|
for _, fi := range pkgFiles {
|
||||||
|
if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
|
||||||
|
return m, pkgDir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// findModuleByDir returns the module that contains dir, or nil if no such
|
||||||
|
// module is in scope.
|
||||||
|
func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
|
||||||
|
// This is quite tricky and may not be correct. dir could be:
|
||||||
|
// - a package in the main module.
|
||||||
|
// - a replace target underneath the main module's directory.
|
||||||
|
// - a nested module in the above.
|
||||||
|
// - a replace target somewhere totally random.
|
||||||
|
// - a nested module in the above.
|
||||||
|
// - in the mod cache.
|
||||||
|
// - in /vendor/ in -mod=vendor mode.
|
||||||
|
// - nested module? Dunno.
|
||||||
|
// Rumor has it that replace targets cannot contain other replace targets.
|
||||||
|
for _, m := range r.modsByDir {
|
||||||
|
if !strings.HasPrefix(dir, m.Dir) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if dirIsNestedModule(dir, m) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirIsNestedModule reports if dir is contained in a nested module underneath
|
||||||
|
// mod, not actually in mod.
|
||||||
|
func dirIsNestedModule(dir string, mod *moduleJSON) bool {
|
||||||
|
if !strings.HasPrefix(dir, mod.Dir) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
mf := findModFile(dir)
|
||||||
|
if mf == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return filepath.Dir(mf) != mod.Dir
|
||||||
|
}
|
||||||
|
|
||||||
|
func findModFile(dir string) string {
|
||||||
|
for {
|
||||||
|
f := filepath.Join(dir, "go.mod")
|
||||||
|
info, err := os.Stat(f)
|
||||||
|
if err == nil && !info.IsDir() {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
d := filepath.Dir(dir)
|
||||||
|
if len(d) >= len(dir) {
|
||||||
|
return "" // reached top of file system, no go.mod
|
||||||
|
}
|
||||||
|
dir = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||||
|
if err := r.init(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
names := map[string]string{}
|
||||||
|
for _, path := range importPaths {
|
||||||
|
_, packageDir := r.findPackage(path)
|
||||||
|
if packageDir == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, err := packageDirToName(packageDir)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
names[path] = name
|
||||||
|
}
|
||||||
|
return names, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
|
||||||
|
if err := r.init(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk GOROOT, GOPATH/pkg/mod, and the main module.
|
||||||
|
roots := []gopathwalk.Root{
|
||||||
|
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
||||||
|
{r.main.Dir, gopathwalk.RootCurrentModule},
|
||||||
|
}
|
||||||
|
for _, p := range filepath.SplitList(r.env.GOPATH) {
|
||||||
|
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk replace targets, just in case they're not in any of the above.
|
||||||
|
for _, mod := range r.modsByModPath {
|
||||||
|
if mod.Replace != nil {
|
||||||
|
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []*pkg
|
||||||
|
dupCheck := make(map[string]bool)
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
gopathwalk.Walk(roots, func(root gopathwalk.Root, dir string) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
if _, dup := dupCheck[dir]; dup {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dupCheck[dir] = true
|
||||||
|
|
||||||
|
subdir := ""
|
||||||
|
if dir != root.Path {
|
||||||
|
subdir = dir[len(root.Path)+len("/"):]
|
||||||
|
}
|
||||||
|
importPath := filepath.ToSlash(subdir)
|
||||||
|
if strings.HasPrefix(importPath, "vendor/") {
|
||||||
|
// Ignore vendor dirs. If -mod=vendor is on, then things
|
||||||
|
// should mostly just work, but when it's not vendor/
|
||||||
|
// is a mess. There's no easy way to tell if it's on.
|
||||||
|
// We can still find things in the mod cache and
|
||||||
|
// map them into /vendor when -mod=vendor is on.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch root.Type {
|
||||||
|
case gopathwalk.RootCurrentModule:
|
||||||
|
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
|
||||||
|
case gopathwalk.RootModuleCache:
|
||||||
|
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||||
|
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
|
||||||
|
if err != nil {
|
||||||
|
if Debug {
|
||||||
|
log.Printf("decoding module cache path %q: %v", subdir, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||||
|
case gopathwalk.RootGOROOT:
|
||||||
|
importPath = subdir
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the directory is underneath a module that's in scope.
|
||||||
|
if mod := r.findModuleByDir(dir); mod != nil {
|
||||||
|
// It is. If dir is the target of a replace directive,
|
||||||
|
// our guessed import path is wrong. Use the real one.
|
||||||
|
if mod.Dir == dir {
|
||||||
|
importPath = mod.Path
|
||||||
|
} else {
|
||||||
|
dirInMod := dir[len(mod.Dir)+len("/"):]
|
||||||
|
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The package is in an unknown module. Check that it's
|
||||||
|
// not obviously impossible to import.
|
||||||
|
var modFile string
|
||||||
|
switch root.Type {
|
||||||
|
case gopathwalk.RootModuleCache:
|
||||||
|
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||||
|
modFile = filepath.Join(matches[1], "@", matches[2], "go.mod")
|
||||||
|
default:
|
||||||
|
modFile = findModFile(dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
modBytes, err := ioutil.ReadFile(modFile)
|
||||||
|
if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) {
|
||||||
|
// The module's declared path does not match
|
||||||
|
// its expected path. It probably needs a
|
||||||
|
// replace directive we don't have.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We may have discovered a package that has a different version
|
||||||
|
// in scope already. Canonicalize to that one if possible.
|
||||||
|
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
|
||||||
|
dir = canonicalDir
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, &pkg{
|
||||||
|
importPathShort: VendorlessPath(importPath),
|
||||||
|
dir: dir,
|
||||||
|
})
|
||||||
|
}, gopathwalk.Options{Debug: Debug, ModulesEnabled: true})
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||||
|
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||||
|
|
||||||
|
var (
|
||||||
|
slashSlash = []byte("//")
|
||||||
|
moduleStr = []byte("module")
|
||||||
|
)
|
||||||
|
|
||||||
|
// modulePath returns the module path from the gomod file text.
|
||||||
|
// If it cannot find a module path, it returns an empty string.
|
||||||
|
// It is tolerant of unrelated problems in the go.mod file.
|
||||||
|
//
|
||||||
|
// Copied from cmd/go/internal/modfile.
|
||||||
|
func modulePath(mod []byte) string {
|
||||||
|
for len(mod) > 0 {
|
||||||
|
line := mod
|
||||||
|
mod = nil
|
||||||
|
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||||
|
line, mod = line[:i], line[i+1:]
|
||||||
|
}
|
||||||
|
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||||
|
line = line[:i]
|
||||||
|
}
|
||||||
|
line = bytes.TrimSpace(line)
|
||||||
|
if !bytes.HasPrefix(line, moduleStr) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line = line[len(moduleStr):]
|
||||||
|
n := len(line)
|
||||||
|
line = bytes.TrimSpace(line)
|
||||||
|
if len(line) == n || len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if line[0] == '"' || line[0] == '`' {
|
||||||
|
p, err := strconv.Unquote(string(line))
|
||||||
|
if err != nil {
|
||||||
|
return "" // malformed quoted string or multiline module path
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(line)
|
||||||
|
}
|
||||||
|
return "" // missing module path
|
||||||
|
}
|
||||||
18
vendor/golang.org/x/tools/imports/sortimports.go
generated
vendored
18
vendor/golang.org/x/tools/imports/sortimports.go
generated
vendored
@@ -167,15 +167,33 @@ func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
|||||||
}
|
}
|
||||||
s.Path.ValuePos = pos[i].Start
|
s.Path.ValuePos = pos[i].Start
|
||||||
s.EndPos = pos[i].End
|
s.EndPos = pos[i].End
|
||||||
|
nextSpecPos := pos[i].End
|
||||||
|
|
||||||
for _, g := range importComment[s] {
|
for _, g := range importComment[s] {
|
||||||
for _, c := range g.List {
|
for _, c := range g.List {
|
||||||
c.Slash = pos[i].End
|
c.Slash = pos[i].End
|
||||||
|
nextSpecPos = c.End()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if i < len(specs)-1 {
|
||||||
|
pos[i+1].Start = nextSpecPos
|
||||||
|
pos[i+1].End = nextSpecPos
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(byCommentPos(comments))
|
sort.Sort(byCommentPos(comments))
|
||||||
|
|
||||||
|
// Fixup comments can insert blank lines, because import specs are on different lines.
|
||||||
|
// We remove those blank lines here by merging import spec to the first import spec line.
|
||||||
|
firstSpecLine := fset.Position(specs[0].Pos()).Line
|
||||||
|
for _, s := range specs[1:] {
|
||||||
|
p := s.Pos()
|
||||||
|
line := fset.File(p).Line(p)
|
||||||
|
for previousLine := line - 1; previousLine >= firstSpecLine; {
|
||||||
|
fset.File(p).MergeLine(previousLine)
|
||||||
|
previousLine--
|
||||||
|
}
|
||||||
|
}
|
||||||
return specs
|
return specs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
19668
vendor/golang.org/x/tools/imports/zstdlib.go
generated
vendored
19668
vendor/golang.org/x/tools/imports/zstdlib.go
generated
vendored
File diff suppressed because it is too large
Load Diff
31
vendor/golang.org/x/tools/internal/fastwalk/BUILD
generated
vendored
Normal file
31
vendor/golang.org/x/tools/internal/fastwalk/BUILD
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"fastwalk.go",
|
||||||
|
"fastwalk_dirent_fileno.go",
|
||||||
|
"fastwalk_dirent_ino.go",
|
||||||
|
"fastwalk_dirent_namlen_bsd.go",
|
||||||
|
"fastwalk_dirent_namlen_linux.go",
|
||||||
|
"fastwalk_portable.go",
|
||||||
|
"fastwalk_unix.go",
|
||||||
|
],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/fastwalk",
|
||||||
|
importpath = "golang.org/x/tools/internal/fastwalk",
|
||||||
|
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
@@ -2,17 +2,9 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// A faster implementation of filepath.Walk.
|
// Package fastwalk provides a faster version of filepath.Walk for file system
|
||||||
//
|
// scanning tools.
|
||||||
// filepath.Walk's design necessarily calls os.Lstat on each file,
|
package fastwalk
|
||||||
// even if the caller needs less info. And goimports only need to know
|
|
||||||
// the type of each file. The kernel interface provides the type in
|
|
||||||
// the Readdir call but the standard library ignored it.
|
|
||||||
// fastwalk_unix.go contains a fork of the syscall routines.
|
|
||||||
//
|
|
||||||
// See golang.org/issue/16399
|
|
||||||
|
|
||||||
package imports
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
@@ -22,10 +14,27 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// traverseLink is a sentinel error for fastWalk, similar to filepath.SkipDir.
|
// TraverseLink is used as a return value from WalkFuncs to indicate that the
|
||||||
var traverseLink = errors.New("traverse symlink, assuming target is a directory")
|
// symlink named in the call may be traversed.
|
||||||
|
var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
||||||
|
|
||||||
// fastWalk walks the file tree rooted at root, calling walkFn for
|
// SkipFiles is a used as a return value from WalkFuncs to indicate that the
|
||||||
|
// callback should not be called for any other files in the current directory.
|
||||||
|
// Child directories will still be traversed.
|
||||||
|
var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
||||||
|
|
||||||
|
// Walk is a faster implementation of filepath.Walk.
|
||||||
|
//
|
||||||
|
// filepath.Walk's design necessarily calls os.Lstat on each file,
|
||||||
|
// even if the caller needs less info.
|
||||||
|
// Many tools need only the type of each file.
|
||||||
|
// On some platforms, this information is provided directly by the readdir
|
||||||
|
// system call, avoiding the need to stat each file individually.
|
||||||
|
// fastwalk_unix.go contains a fork of the syscall routines.
|
||||||
|
//
|
||||||
|
// See golang.org/issue/16399
|
||||||
|
//
|
||||||
|
// Walk walks the file tree rooted at root, calling walkFn for
|
||||||
// each file or directory in the tree, including root.
|
// each file or directory in the tree, including root.
|
||||||
//
|
//
|
||||||
// If fastWalk returns filepath.SkipDir, the directory is skipped.
|
// If fastWalk returns filepath.SkipDir, the directory is skipped.
|
||||||
@@ -36,10 +45,10 @@ var traverseLink = errors.New("traverse symlink, assuming target is a directory"
|
|||||||
// any permission bits.
|
// any permission bits.
|
||||||
// * multiple goroutines stat the filesystem concurrently. The provided
|
// * multiple goroutines stat the filesystem concurrently. The provided
|
||||||
// walkFn must be safe for concurrent use.
|
// walkFn must be safe for concurrent use.
|
||||||
// * fastWalk can follow symlinks if walkFn returns the traverseLink
|
// * fastWalk can follow symlinks if walkFn returns the TraverseLink
|
||||||
// sentinel error. It is the walkFn's responsibility to prevent
|
// sentinel error. It is the walkFn's responsibility to prevent
|
||||||
// fastWalk from going into symlink cycles.
|
// fastWalk from going into symlink cycles.
|
||||||
func fastWalk(root string, walkFn func(path string, typ os.FileMode) error) error {
|
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
|
||||||
// TODO(bradfitz): make numWorkers configurable? We used a
|
// TODO(bradfitz): make numWorkers configurable? We used a
|
||||||
// minimum of 4 to give the kernel more info about multiple
|
// minimum of 4 to give the kernel more info about multiple
|
||||||
// things we want, in hopes its I/O scheduling can take
|
// things we want, in hopes its I/O scheduling can take
|
||||||
@@ -158,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
|
|||||||
|
|
||||||
err := w.fn(joined, typ)
|
err := w.fn(joined, typ)
|
||||||
if typ == os.ModeSymlink {
|
if typ == os.ModeSymlink {
|
||||||
if err == traverseLink {
|
if err == TraverseLink {
|
||||||
// Set callbackDone so we don't call it twice for both the
|
// Set callbackDone so we don't call it twice for both the
|
||||||
// symlink-as-symlink and the symlink-as-directory later:
|
// symlink-as-symlink and the symlink-as-directory later:
|
||||||
w.enqueue(walkItem{dir: joined, callbackDone: true})
|
w.enqueue(walkItem{dir: joined, callbackDone: true})
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
// +build freebsd openbsd netbsd
|
// +build freebsd openbsd netbsd
|
||||||
|
|
||||||
package imports
|
package fastwalk
|
||||||
|
|
||||||
import "syscall"
|
import "syscall"
|
||||||
|
|
||||||
@@ -2,9 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build linux,!appengine darwin
|
// +build linux darwin
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
package imports
|
package fastwalk
|
||||||
|
|
||||||
import "syscall"
|
import "syscall"
|
||||||
|
|
||||||
13
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin freebsd openbsd netbsd
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func direntNamlen(dirent *syscall.Dirent) uint64 {
|
||||||
|
return uint64(dirent.Namlen)
|
||||||
|
}
|
||||||
29
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
generated
vendored
Normal file
29
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package fastwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func direntNamlen(dirent *syscall.Dirent) uint64 {
|
||||||
|
const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
|
||||||
|
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
|
const nameBufLen = uint16(len(nameBuf))
|
||||||
|
limit := dirent.Reclen - fixedHdr
|
||||||
|
if limit > nameBufLen {
|
||||||
|
limit = nameBufLen
|
||||||
|
}
|
||||||
|
nameLen := bytes.IndexByte(nameBuf[:limit], 0)
|
||||||
|
if nameLen < 0 {
|
||||||
|
panic("failed to find terminating 0 byte in dirent")
|
||||||
|
}
|
||||||
|
return uint64(nameLen)
|
||||||
|
}
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
|
// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
|
||||||
|
|
||||||
package imports
|
package fastwalk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -20,8 +20,16 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
skipFiles := false
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
|
if fi.Mode().IsRegular() && skipFiles {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
|
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
|
||||||
|
if err == SkipFiles {
|
||||||
|
skipFiles = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2,12 +2,12 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build linux,!appengine darwin freebsd openbsd netbsd
|
// +build linux darwin freebsd openbsd netbsd
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
package imports
|
package fastwalk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -23,7 +23,7 @@ const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDe
|
|||||||
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
|
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
|
||||||
fd, err := syscall.Open(dirName, 0, 0)
|
fd, err := syscall.Open(dirName, 0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return &os.PathError{Op: "open", Path: dirName, Err: err}
|
||||||
}
|
}
|
||||||
defer syscall.Close(fd)
|
defer syscall.Close(fd)
|
||||||
|
|
||||||
@@ -31,6 +31,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
|||||||
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
|
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
|
||||||
bufp := 0 // starting read position in buf
|
bufp := 0 // starting read position in buf
|
||||||
nbuf := 0 // end valid data in buf
|
nbuf := 0 // end valid data in buf
|
||||||
|
skipFiles := false
|
||||||
for {
|
for {
|
||||||
if bufp >= nbuf {
|
if bufp >= nbuf {
|
||||||
bufp = 0
|
bufp = 0
|
||||||
@@ -61,7 +62,14 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
|||||||
}
|
}
|
||||||
typ = fi.Mode() & os.ModeType
|
typ = fi.Mode() & os.ModeType
|
||||||
}
|
}
|
||||||
|
if skipFiles && typ.IsRegular() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err := fn(dirName, name, typ); err != nil {
|
if err := fn(dirName, name, typ); err != nil {
|
||||||
|
if err == SkipFiles {
|
||||||
|
skipFiles = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,10 +113,7 @@ func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
|
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
nameLen := bytes.IndexByte(nameBuf[:], 0)
|
nameLen := direntNamlen(dirent)
|
||||||
if nameLen < 0 {
|
|
||||||
panic("failed to find terminating 0 byte in dirent")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special cases for common things:
|
// Special cases for common things:
|
||||||
if nameLen == 1 && nameBuf[0] == '.' {
|
if nameLen == 1 && nameBuf[0] == '.' {
|
||||||
24
vendor/golang.org/x/tools/internal/gopathwalk/BUILD
generated
vendored
Normal file
24
vendor/golang.org/x/tools/internal/gopathwalk/BUILD
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["walk.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/gopathwalk",
|
||||||
|
importpath = "golang.org/x/tools/internal/gopathwalk",
|
||||||
|
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||||
|
deps = ["//vendor/golang.org/x/tools/internal/fastwalk:go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
250
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
Normal file
250
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package gopathwalk is like filepath.Walk but specialized for finding Go
|
||||||
|
// packages, particularly in $GOPATH and $GOROOT.
|
||||||
|
package gopathwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/build"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/tools/internal/fastwalk"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options controls the behavior of a Walk call.
|
||||||
|
type Options struct {
|
||||||
|
Debug bool // Enable debug logging
|
||||||
|
ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
|
||||||
|
}
|
||||||
|
|
||||||
|
// RootType indicates the type of a Root.
|
||||||
|
type RootType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RootUnknown RootType = iota
|
||||||
|
RootGOROOT
|
||||||
|
RootGOPATH
|
||||||
|
RootCurrentModule
|
||||||
|
RootModuleCache
|
||||||
|
RootOther
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Root is a starting point for a Walk.
|
||||||
|
type Root struct {
|
||||||
|
Path string
|
||||||
|
Type RootType
|
||||||
|
}
|
||||||
|
|
||||||
|
// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible.
|
||||||
|
func SrcDirsRoots(ctx *build.Context) []Root {
|
||||||
|
var roots []Root
|
||||||
|
roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT})
|
||||||
|
for _, p := range filepath.SplitList(ctx.GOPATH) {
|
||||||
|
roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH})
|
||||||
|
}
|
||||||
|
return roots
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||||
|
// For each package found, add will be called (concurrently) with the absolute
|
||||||
|
// paths of the containing source directory and the package directory.
|
||||||
|
// add will be called concurrently.
|
||||||
|
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
|
||||||
|
for _, root := range roots {
|
||||||
|
walkDir(root, add, opts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkDir(root Root, add func(Root, string), opts Options) {
|
||||||
|
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||||
|
if opts.Debug {
|
||||||
|
log.Printf("skipping nonexistant directory: %v", root.Path)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if opts.Debug {
|
||||||
|
log.Printf("scanning %s", root.Path)
|
||||||
|
}
|
||||||
|
w := &walker{
|
||||||
|
root: root,
|
||||||
|
add: add,
|
||||||
|
opts: opts,
|
||||||
|
}
|
||||||
|
w.init()
|
||||||
|
if err := fastwalk.Walk(root.Path, w.walk); err != nil {
|
||||||
|
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Debug {
|
||||||
|
log.Printf("scanned %s", root.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// walker is the callback for fastwalk.Walk.
|
||||||
|
type walker struct {
|
||||||
|
root Root // The source directory to scan.
|
||||||
|
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
|
||||||
|
opts Options // Options passed to Walk by the user.
|
||||||
|
|
||||||
|
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes the walker based on its Options.
|
||||||
|
func (w *walker) init() {
|
||||||
|
var ignoredPaths []string
|
||||||
|
if w.root.Type == RootModuleCache {
|
||||||
|
ignoredPaths = []string{"cache"}
|
||||||
|
}
|
||||||
|
if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
|
||||||
|
ignoredPaths = w.getIgnoredDirs(w.root.Path)
|
||||||
|
ignoredPaths = append(ignoredPaths, "v", "mod")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range ignoredPaths {
|
||||||
|
full := filepath.Join(w.root.Path, p)
|
||||||
|
if fi, err := os.Stat(full); err == nil {
|
||||||
|
w.ignoredDirs = append(w.ignoredDirs, fi)
|
||||||
|
if w.opts.Debug {
|
||||||
|
log.Printf("Directory added to ignore list: %s", full)
|
||||||
|
}
|
||||||
|
} else if w.opts.Debug {
|
||||||
|
log.Printf("Error statting ignored directory: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
|
||||||
|
// of relative directories to ignore when scanning for go files.
|
||||||
|
// The provided path is one of the $GOPATH entries with "src" appended.
|
||||||
|
func (w *walker) getIgnoredDirs(path string) []string {
|
||||||
|
file := filepath.Join(path, ".goimportsignore")
|
||||||
|
slurp, err := ioutil.ReadFile(file)
|
||||||
|
if w.opts.Debug {
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Read %s", file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var ignoredDirs []string
|
||||||
|
bs := bufio.NewScanner(bytes.NewReader(slurp))
|
||||||
|
for bs.Scan() {
|
||||||
|
line := strings.TrimSpace(bs.Text())
|
||||||
|
if line == "" || strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ignoredDirs = append(ignoredDirs, line)
|
||||||
|
}
|
||||||
|
return ignoredDirs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
|
||||||
|
for _, ignoredDir := range w.ignoredDirs {
|
||||||
|
if os.SameFile(fi, ignoredDir) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) walk(path string, typ os.FileMode) error {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if typ.IsRegular() {
|
||||||
|
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||||
|
// Doesn't make sense to have regular files
|
||||||
|
// directly in your $GOPATH/src or $GOROOT/src.
|
||||||
|
return fastwalk.SkipFiles
|
||||||
|
}
|
||||||
|
if !strings.HasSuffix(path, ".go") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.add(w.root, dir)
|
||||||
|
return fastwalk.SkipFiles
|
||||||
|
}
|
||||||
|
if typ == os.ModeDir {
|
||||||
|
base := filepath.Base(path)
|
||||||
|
if base == "" || base[0] == '.' || base[0] == '_' ||
|
||||||
|
base == "testdata" ||
|
||||||
|
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
|
||||||
|
(!w.opts.ModulesEnabled && base == "node_modules") {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
fi, err := os.Lstat(path)
|
||||||
|
if err == nil && w.shouldSkipDir(fi) {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if typ == os.ModeSymlink {
|
||||||
|
base := filepath.Base(path)
|
||||||
|
if strings.HasPrefix(base, ".#") {
|
||||||
|
// Emacs noise.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fi, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
// Just ignore it.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if w.shouldTraverse(dir, fi) {
|
||||||
|
return fastwalk.TraverseLink
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldTraverse reports whether the symlink fi, found in dir,
|
||||||
|
// should be followed. It makes sure symlinks were never visited
|
||||||
|
// before to avoid symlink loops.
|
||||||
|
func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
|
||||||
|
path := filepath.Join(dir, fi.Name())
|
||||||
|
target, err := filepath.EvalSymlinks(path)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ts, err := os.Stat(target)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !ts.IsDir() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if w.shouldSkipDir(ts) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Check for symlink loops by statting each directory component
|
||||||
|
// and seeing if any are the same file as ts.
|
||||||
|
for {
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
if parent == path {
|
||||||
|
// Made it to the root without seeing a cycle.
|
||||||
|
// Use this symlink.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
parentInfo, err := os.Stat(parent)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if os.SameFile(ts, parentInfo) {
|
||||||
|
// Cycle. Don't traverse.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
path = parent
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
24
vendor/golang.org/x/tools/internal/module/BUILD
generated
vendored
Normal file
24
vendor/golang.org/x/tools/internal/module/BUILD
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["module.go"],
|
||||||
|
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/module",
|
||||||
|
importpath = "golang.org/x/tools/internal/module",
|
||||||
|
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||||
|
deps = ["//vendor/golang.org/x/tools/internal/semver:go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user